
    KKi'                     ~   S r SSKJrJr  SSKJr  SSKJr  SSKJ	r	  SSK
Jr  SSKJr  SSKJrJr  SS	KJrJr  SS
KJrJrJr  SSKJr  SSKJr  SSKJr  SSKJ r J!r!  SSK"J#r$  \\\S-  /\\-  \%-  4   r&\\\   \\   S-  /\\-  \%-  4   r' " S S\5      r( " S S\(5      r)\&\-  \!-  r*\ \+-  \(-  r, " S S\5      r-g)z!Configuration for run evaluators.    )CallableSequence)Any)
Embeddings)BaseLanguageModel)BasePromptTemplate)RunEvaluator)EvaluationResultEvaluationResults)ExampleRun)	BaseModel
ConfigDictField)override)CRITERIA_TYPE)EmbeddingDistance)EvaluatorTypeStringEvaluator)StringDistanceNc                   <    \ rS rSr% Sr\\S'   S\\\	4   4S jr
Srg)
EvalConfig"   zhConfiguration for a given run evaluator.

Attributes:
    evaluator_type: The type of evaluator to use.
evaluator_typereturnc                 >    0 nU  H  u  p#US:X  d  Uc  M  X1U'   M     U$ )z|Get the keyword arguments for the `load_evaluator` call.

Returns:
    The keyword arguments for the `load_evaluator` call.
r    )selfkwargsfieldvals       k/var/www/html/dynamic-report/venv/lib/python3.13/site-packages/langchain_classic/smith/evaluation/config.py
get_kwargsEvalConfig.get_kwargs+   s3     JE((CK5M      r   N)__name__
__module____qualname____firstlineno____doc__r   __annotations__dictstrr   r#   __static_attributes__r   r%   r"   r   r   "   s#     "!DcN r%   r   c                      ^  \ rS rSr% SrSr\S-  \S'    Sr\S-  \S'    Sr	\S-  \S'    \
S\\\4   4U 4S jj5       rS	rU =r$ )
SingleKeyEvalConfig9   zBConfiguration for a run evaluator that only requires a single key.Nreference_keyprediction_key	input_keyr   c                 Z   > [         TU ]  5       nS H  nUR                  US 5        M     U$ )N)r2   r3   r4   )superr#   pop)r   r   key	__class__s      r"   r#   SingleKeyEvalConfig.get_kwargsG   s.    #%CCJJsD! Dr%   r   )r&   r'   r(   r)   r*   r2   r-   r+   r3   r4   r   r,   r   r#   r.   __classcell__)r9   s   @r"   r0   r0   9   sf    L $M3:$@!%NC$J% !IsTz B DcN  r%   r0   c                      \ rS rSr% Sr\" \S9r\\\	-     \
S'    Sr\\	   S-  \
S'    Sr\\   S-  \
S'    Sr\S-  \
S'    Sr\S-  \
S	'    Sr\S-  \
S
'    Sr\S-  \
S'    \" SS9r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r " S S\5      r  " S  S!\5      r! " S" S#\5      r" " S$ S%\5      r# " S& S'\#5      r$S(r%g))RunEvalConfigT   z#Configuration for a run evaluation.)default_factory
evaluatorsNcustom_evaluatorsbatch_evaluatorsr2   r3   r4   eval_llmTarbitrary_types_allowedc                   d    \ rS rSr% SrSr\S-  \S'   Sr\	S-  \S'   \
R                  r\
\S'   Srg)RunEvalConfig.Criteria{   zConfiguration for a reference-free criteria evaluator.

Attributes:
    criteria: The criteria to evaluate.
    llm: The language model to use for the evaluation chain.
Ncriteriallmr   r   )r&   r'   r(   r)   r*   rI   r   r+   rJ   r   r   CRITERIAr   r.   r   r%   r"   CriteriarG   {   s8    	 *.-$&-(,%,(5(>(>>r%   rL   c                   d    \ rS rSr% SrSr\S-  \S'   Sr\	S-  \S'   \
R                  r\
\S'   Srg)RunEvalConfig.LabeledCriteria   zConfiguration for a labeled (with references) criteria evaluator.

Attributes:
    criteria: The criteria to evaluate.
    llm: The language model to use for the evaluation chain.
NrI   rJ   r   r   )r&   r'   r(   r)   r*   rI   r   r+   rJ   r   r   LABELED_CRITERIAr   r.   r   r%   r"   LabeledCriteriarN      s8    	 *.-$&-(,%,(5(F(FFr%   rQ   c                   p    \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\S-  \S'   \" SS9rS	rg)
RunEvalConfig.EmbeddingDistance   zConfiguration for an embedding distance evaluator.

Attributes:
    embeddings: The embeddings to use for computing the distance.
    distance_metric: The distance metric to use for computing the distance.
r   N
embeddingsdistance_metricTrD   r   )r&   r'   r(   r)   r*   r   EMBEDDING_DISTANCEr   r+   rU   r   rV   EmbeddingDistanceEnumr   model_configr.   r   r%   r"   r   rS      sE    	 )6(H(HH(,
J%,8<.5<!$(
r%   r   c                   ^    \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\\S'   Srg)	RunEvalConfig.StringDistance   aM  Configuration for a string distance evaluator.

Attributes:
    distance: The string distance metric to use (`damerau_levenshtein`,
        `levenshtein`, `jaro`, or `jaro_winkler`).
    normalize_score: Whether to normalize the distance to between 0 and 1.
        Applies only to the Levenshtein and Damerau-Levenshtein distances.
r   NdistanceTnormalize_scorer   )r&   r'   r(   r)   r*   r   STRING_DISTANCEr   r+   r]   StringDistanceEnumr^   boolr.   r   r%   r"   r   r[      s3    	 )6(E(EE.2$t+2 $$r%   r   c                   d    \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\S-  \S'   Srg)RunEvalConfig.QA   zConfiguration for a QA evaluator.

Attributes:
    prompt: The prompt template to use for generating the question.
    llm: The language model to use for the evaluation chain.
r   NrJ   promptr   )r&   r'   r(   r)   r*   r   QAr   r+   rJ   r   re   r   r.   r   r%   r"   rf   rc      s9    	 )6(8(88(,%,,0"T)0r%   rf   c                   d    \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\S-  \S'   Srg)RunEvalConfig.ContextQA   Configuration for a context-based QA evaluator.

Attributes:
    prompt: The prompt template to use for generating the question.
    llm: The language model to use for the evaluation chain.
r   NrJ   re   r   r&   r'   r(   r)   r*   r   
CONTEXT_QAr   r+   rJ   r   re   r   r.   r   r%   r"   	ContextQArh      9    	 )6(@(@@(,%,,0"T)0r%   rm   c                   d    \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\S-  \S'   Srg)RunEvalConfig.CoTQA   rj   r   NrJ   re   r   rk   r   r%   r"   CoTQArp      rn   r%   rr   c                   <    \ rS rSr% Sr\R                  r\\S'   Sr	g)RunEvalConfig.JsonValidity   z,Configuration for a json validity evaluator.r   r   N)
r&   r'   r(   r)   r*   r   JSON_VALIDITYr   r+   r.   r   r%   r"   JsonValidityrt          :(5(C(CCr%   rw   c                   <    \ rS rSr% Sr\R                  r\\S'   Sr	g)#RunEvalConfig.JsonEqualityEvaluator   z,Configuration for a json equality evaluator.r   r   N)
r&   r'   r(   r)   r*   r   JSON_EQUALITYr   r+   r.   r   r%   r"   JsonEqualityEvaluatorrz      rx   r%   r}   c                   f    \ rS rSr% Sr\R                  r\\S'   Sr	\
\S'   Sr\
\S'   Sr\
\S'   Srg	)
RunEvalConfig.ExactMatch   a  Configuration for an exact match string evaluator.

Attributes:
    ignore_case: Whether to ignore case when comparing strings.
    ignore_punctuation: Whether to ignore punctuation when comparing strings.
    ignore_numbers: Whether to ignore numbers when comparing strings.
r   Fignore_caseignore_punctuationignore_numbersr   N)r&   r'   r(   r)   r*   r   EXACT_MATCHr   r+   r   ra   r   r   r.   r   r%   r"   
ExactMatchr      s8    	 )6(A(AA!T!#(D($$r%   r   c                   J    \ rS rSr% Sr\R                  r\\S'   Sr	\
\S'   Srg)RunEvalConfig.RegexMatch   zConfiguration for a regex match string evaluator.

Attributes:
    flags: The flags to pass to the regex. Example: `re.IGNORECASE`.
r   r   flagsr   N)r&   r'   r(   r)   r*   r   REGEX_MATCHr   r+   r   intr.   r   r%   r"   
RegexMatchr      s"    	 )6(A(AAsr%   r   c                       \ rS rSr% Sr\R                  r\\S'   Sr	\
S-  \S'   Sr\S-  \S'   Sr\S-  \S'   Sr\S-  \S'   S	rg)
RunEvalConfig.ScoreString   a  Configuration for a score string evaluator.

This is like the criteria evaluator but it is configured by
default to return a score on the scale from 1-10.

It is recommended to normalize these scores
by setting `normalize_by` to 10.

Attributes:
    criteria: The criteria to evaluate.
    llm: The language model to use for the evaluation chain.
    normalize_by: If you want to normalize the score, the denominator to use.
        If not provided, the score will be between 1 and 10.
    prompt: The prompt template to use for evaluation.
r   NrI   rJ   normalize_byre   r   )r&   r'   r(   r)   r*   r   SCORE_STRINGr   r+   rI   r   rJ   r   r   floatre   r   r.   r   r%   r"   ScoreStringr      sV    	  )6(B(BB)--$&-(,%,%)edl),0"T)0r%   r   c                   <    \ rS rSr% Sr\R                  r\\S'   Sr	g) RunEvalConfig.LabeledScoreStringi  z3Configuration for a labeled score string evaluator.r   r   N)
r&   r'   r(   r)   r*   r   LABELED_SCORE_STRINGr   r+   r.   r   r%   r"   LabeledScoreStringr     s    A(5(J(JJr%   r   r   )&r&   r'   r(   r)   r*   r   listr@   SINGLE_EVAL_CONFIG_TYPECUSTOM_EVALUATOR_TYPEr+   rA   rB   BATCH_EVALUATOR_LIKEr2   r-   r3   r4   rC   r   r   rY   r0   rL   rQ   r   r   rf   rm   rr   rw   r   r}   r   r   r   r   r.   r   r%   r"   r=   r=   T   sq   -HMIJ,/DDE Q =At12T9@8:>d/047> !%M3:$@!%NC$J% !IsTz B)-H$&-H $L
?& 
?
G- 
G
/ 
 %, %
1  
1
1' 
1
1# 
1D* D
D
 D
%( %( 1) 1.K[ Kr%   r=   ).r*   collections.abcr   r   typingr   langchain_core.embeddingsr   langchain_core.language_modelsr   langchain_core.promptsr   	langsmithr	   langsmith.evaluation.evaluatorr
   r   langsmith.schemasr   r   pydanticr   r   r   typing_extensionsr   0langchain_classic.evaluation.criteria.eval_chainr   4langchain_classic.evaluation.embedding_distance.baser   rX   #langchain_classic.evaluation.schemar   r   1langchain_classic.evaluation.string_distance.baser   r`   r,   RUN_EVALUATOR_LIKEr   r   r0   r   r-   r   r=   r   r%   r"   <module>r      s    ' .  0 < 5 " N * 1 1 & J O 'D.((4/1   c]HW%,-((4/1  .* . +\9OK '#-
: }KI }Kr%   