o
    /if$                     @  sT  d dl mZ d dlZd dlZd dlZd dlmZmZ d dl	m
Z
 d dlmZ d dlmZ d dlmZ d dlmZ d d	lmZmZ d d
lmZmZ d dlmZmZ d dlmZ d dlmZm Z m!Z! ej"rud dl#m$Z$ d dl%m&Z& d dl'm(Z( e)e*Z+eeeeeegZ,d$ddZ-e
G dd deZ.G dd de.Z/G dd de.Z0	d%d&d"d#Z1dS )'    )annotationsN)ABCabstractmethod)	dataclass)partial)ChatVertexAI)VertexAI)BaseLanguageModel)
Generation	LLMResult)AzureChatOpenAI
ChatOpenAI)AzureOpenAIOpenAI)
BaseOpenAI)	RunConfigadd_async_retry	add_retry)	Callbacks)BaseLLM)PromptValuellmr	   returnboolc                 C  s   t D ]
}t| |r dS qdS )z3Return whether the given LLM supports n-completion.TF)MULTIPLE_COMPLETION_SUPPORTED
isinstance)r   llm_type r   H/var/www/html/corbot_env/lib/python3.10/site-packages/ragas/llms/base.py is_multiple_completion_supported&   s
   
r   c                   @  sp   e Zd ZU ded< dddZd d	d
Ze				d!d"ddZe				d!d"ddZ					d#d$ddZ	dS )%BaseRagasLLMr   
run_configc                 C  s
   || _ d S N)r!   )selfr!   r   r   r   set_run_config2   s   
zBaseRagasLLM.set_run_confignintr   floatc                 C  s   |dkrdS dS )z8Return the temperature to use for completion based on n.   g333333?:0yE>r   )r#   r%   r   r   r   get_temperature5   s   zBaseRagasLLM.get_temperaturer(   r)   Npromptr   temperaturestopt.Optional[t.List[str]]	callbacksr   r   c                 C  s   d S r"   r   r#   r+   r%   r,   r-   r/   r   r   r   generate_text9   s   	zBaseRagasLLM.generate_textc                   s   d S r"   r   r0   r   r   r   agenerate_textD   s   	zBaseRagasLLM.agenerate_textTis_asyncr   c                   sh   |rt | j| j}||||||dI dH S t }t| j| j}	t|	|||||d}
|d|
I dH S )z)Generate text using the given event loop.)r+   r%   r,   r-   r/   N)	r   r2   r!   asyncioget_event_loopr   r1   r   run_in_executor)r#   r+   r%   r,   r-   r/   r3   agenerate_text_with_retryloopgenerate_text_with_retryr1   r   r   r   generateO   s.   
zBaseRagasLLM.generater!   r   )r%   r&   r   r'   r(   r)   NNr+   r   r%   r&   r,   r'   r-   r.   r/   r   r   r   )r(   r)   NNT)r+   r   r%   r&   r,   r'   r-   r.   r/   r   r3   r   r   r   )
__name__
__module____qualname____annotations__r$   r*   r   r1   r2   r:   r   r   r   r   r    .   s,   
 


r    c                   @  sP   e Zd ZdZ	ddddZ			
		d d!ddZ			
		d d!ddZd"ddZdS )#LangchainLLMWrappera  
    A simple base class for RagasLLMs that is based on Langchain's BaseLanguageModel
    interface. it implements 2 functions:
    - generate_text: for generating text from a given PromptValue
    - agenerate_text: for generating text from a given PromptValue asynchronously
    Nlangchain_llmr	   r!   t.Optional[RunConfig]c                 C  s"   || _ |d u r
t }| | d S r"   )rC   r   r$   )r#   rC   r!   r   r   r   __init__z   s   zLangchainLLMWrapper.__init__r(   r)   r+   r   r%   r&   r,   r'   r-   r.   r/   r   r   r   c                 C  sd   | j |d}t| jr| jj|g||||dS | jj|g| |||d}dd |jD g}||_|S )Nr%   promptsr%   r,   r-   r/   rH   r,   r-   r/   c                 S     g | ]}|d  qS r   r   .0gr   r   r   
<listcomp>       z5LangchainLLMWrapper.generate_text.<locals>.<listcomp>)r*   r   rC   generate_promptgenerationsr#   r+   r%   r,   r-   r/   resultrR   r   r   r   r1      s$   
z!LangchainLLMWrapper.generate_textc                   sr   | j |d}t| jr| jj|g||||dI d H S | jj|g| |||dI d H }dd |jD g}||_|S )NrF   rG   rI   c                 S  rJ   rK   r   rL   r   r   r   rO      rP   z6LangchainLLMWrapper.agenerate_text.<locals>.<listcomp>)r*   r   rC   agenerate_promptrR   rS   r   r   r   r2      s&   
z"LangchainLLMWrapper.agenerate_textr   c                 C  s`   || _ t| jtst| jtr.zddlm} W n ty"   tdw |j| j_	|| j _
d S d S )Nr   )RateLimitErrorz\openai.error.RateLimitError not found. Please install openai package as `pip install openai`)r!   r   rC   r   r   openairV   ImportErrortimeoutrequest_timeoutexception_types)r#   r!   rV   r   r   r   r$      s   
z"LangchainLLMWrapper.set_run_configr"   )rC   r	   r!   rD   r<   r=   r;   )r>   r?   r@   __doc__rE   r1   r2   r$   r   r   r   r   rB   r   s    !rB   c                   @  sP   e Zd ZdZ	ddddZd ddZ				d!d"ddZ				d!d"ddZdS )#LlamaIndexLLMWrapperz'
    A Adaptor for LlamaIndex LLMs
    Nr   r   r!   rD   c                 C  sB   || _ d| _t| j j dkrd| _|d u rt }| | d S )N bedrock)r   
_signaturetyper>   lowerr   r$   )r#   r   r!   r   r   r   rE      s   zLlamaIndexLLMWrapper.__init__r%   r&   r,   r'   r-   r.   r/   r   r   dict[str, t.Any]c                 C  sf   |dkr	t d |dkrt d |d urt d |d ur$t d | jdkr-d|iS |||d	S )
Nr(   z7n values greater than 1 not support for LlamaIndex LLMsr)   z*temperature kwarg passed to LlamaIndex LLMz#stop kwarg passed to LlamaIndex LLMz?callbacks not supported for LlamaIndex LLMs, ignoring callbacksr_   r,   )r%   r,   r-   )loggerwarninginfor`   )r#   r%   r,   r-   r/   r   r   r   
check_args   s    



zLlamaIndexLLMWrapper.check_argsr(   r)   r+   r   r   c                 C  s>   |  ||||}| jj| fi |}tt|jdggdS N)text)rR   )rg   r   complete	to_stringr   r
   ri   r#   r+   r%   r,   r-   r/   kwargsli_responser   r   r   r1      s   z"LlamaIndexLLMWrapper.generate_textc                   sF   |  ||||}| jj| fi |I d H }tt|jdggdS rh   )rg   r   	acompleterk   r   r
   ri   rl   r   r   r   r2   	  s   z#LlamaIndexLLMWrapper.agenerate_textr"   )r   r   r!   rD   )
r%   r&   r,   r'   r-   r.   r/   r   r   rc   r<   r=   )r>   r?   r@   r\   rE   rg   r1   r2   r   r   r   r   r]      s    
r]   gpt-3.5-turbomodelstrr!   rD   c                 C  s(   d }|d ur	|j }t| |d}t||S )N)rq   rY   )rY   r   rB   )rq   r!   rY   openai_modelr   r   r   llm_factory  s
   
rt   )r   r	   r   r   )rp   N)rq   rr   r!   rD   r   r    )2
__future__r   r4   loggingtypingtabcr   r   dataclassesr   	functoolsr   (langchain_community.chat_models.vertexair   langchain_community.llmsr   langchain_core.language_modelsr	   langchain_core.outputsr
   r   langchain_openai.chat_modelsr   r   langchain_openai.llmsr   r   langchain_openai.llms.baser   ragas.run_configr   r   r   TYPE_CHECKINGlangchain_core.callbacksr   llama_index.core.base.llms.baser   ragas.llms.promptr   	getLoggerr>   rd   r   r   r    rB   r]   rt   r   r   r   r   <module>   sD    


C]I