o
    Zh                     @  s   d dl mZ d dlZd dlZd dlmZmZ d dlmZ d dl	Z	d dl
mZ d dlmZmZ ejr7d dlmZ eeZedd	d
dgdddZe	jdddZdddZeG dd deZe ZdS )    )annotationsN)	dataclassfield)List)Prompt)EvaluationModeMetricWithLLM)	Callbackscontext_relevancya  Please extract relevant sentences from the provided context that is absolutely required answer the following question. If no relevant sentences are found, or if you believe the question cannot be answered from the given context, return the phrase "Insufficient Information".  While extracting candidate sentences you're not allowed to make any changes to sentences from given context.questioncontextzcandidate sentencesjson)nameZinstructionZ
input_keysZ
output_keyoutput_typeenF)languagecleantextstrreturn	List[str]c                 C  s   t | }t|tsJ |S )z'
    tokenizer text into sentences
    )segsegment
isinstancelist)r   Z	sentences r   W/var/www/html/lang_env/lib/python3.10/site-packages/ragas/metrics/_context_relevancy.pysent_tokenize   s   
r   c                   @  sz   e Zd ZU dZdZded< ejZded< e	dd d	Z
d
ed< dZded< d$ddZd%ddZd&d'd d!Zd&d(d"d#ZdS ))ContextRelevancyz
    Extracts sentences from the context that are relevant to the question with
    self-consistency checks. The number of relevant sentences and is used as the score.

    Attributes
    ----------
    name : str
    r
   r   r   r   evaluation_modec                   C  s   t S N)CONTEXT_RELEVANCEr   r   r   r   <lambda>4   s    zContextRelevancy.<lambda>)default_factoryr   context_relevancy_promptFboolshow_deprecation_warningresponserowt.Dictr   floatc                 C  sX   d |d }t|}| dkrt| ng }t|dkr!dS tt|t| dS )N
contextszinsufficient information.r      )joinr   lowerstriplenmin)selfr'   r(   r   Zcontext_sentsindicesr   r   r   _compute_score7   s   zContextRelevancy._compute_score	callbacksr	   is_asyncc                   sv   | j d us
J d| jrtd |d |d }}| j j| jj|d|d|dI d H }| |j	d d j
|S )	NzLLM is not initializedzThe 'context_relevancy' metric is going to be deprecated soon! Please use the 'context_precision' metric instead. It is a drop-in replacement just a simple search and replace should work.r   r,   r+   )r   r   )r6   r   )llmr&   loggerwarninggenerater$   formatr.   r5   Zgenerationsr   )r3   r(   r6   r7   r   r,   resultr   r   r   _ascoreE   s   
zContextRelevancy._ascoreNr   	cache_dir
str | NoneNonec                 C  s:   | j d us	J dtd|  | j|| j || _d S )Nzset LLM before usezAdapting Context Relevancy to )r8   r9   infor$   adapt)r3   r   r?   r   r   r   rC   V   s
   
zContextRelevancy.adaptc                 C  s   | j | d S r    )r$   save)r3   r?   r   r   r   rD   ^   s   zContextRelevancy.save)r'   r   r(   r)   r   r*   )r(   r)   r6   r	   r7   r%   r   r*   r    )r   r   r?   r@   r   rA   )r?   r@   r   rA   )__name__
__module____qualname____doc__r   __annotations__r   Zqcr   r   r$   r&   r5   r>   rC   rD   r   r   r   r   r   '   s   
 	

r   )r   r   r   r   )
__future__r   loggingtypingtdataclassesr   r   r   ZpysbdZragas.llms.promptr   Zragas.metrics.baser   r   TYPE_CHECKINGZlangchain_core.callbacks.baser	   	getLoggerrE   r9   r!   Z	Segmenterr   r   r   r
   r   r   r   r   <module>   s.    
	
	
: