o
    Zhn                     @   sj   d dl Z d dlmZmZmZmZ d dlZd dlmZ d dl	m
Z
 d dlmZ e eZG dd de
ZdS )    N)AnyListMappingOptional)CallbackManagerForLLMRun)LLM)enforce_stop_tokensc                   @   s   e Zd ZU dZdZeed< 	 dZee	 ed< 	 dZ
eed< 	 dZeed	< 	 g Zee ed
< 	 dZeed< 	 dZeed< 	 edefddZedeeef fddZ		ddedeee  dee dedef
ddZdS )ChatGLMa.  ChatGLM LLM service.

    Example:
        .. code-block:: python

            from langchain_community.llms import ChatGLM
            endpoint_url = (
                "http://127.0.0.1:8000"
            )
            ChatGLM_llm = ChatGLM(
                endpoint_url=endpoint_url
            )
    zhttp://127.0.0.1:8000/endpoint_urlNmodel_kwargsi N  	max_tokeng?temperaturehistorygffffff?top_pFwith_historyreturnc                 C   s   dS )NZchat_glm )selfr   r   W/var/www/html/lang_env/lib/python3.10/site-packages/langchain_community/llms/chatglm.py	_llm_type+   s   zChatGLM._llm_typec                 C   s    | j pi }i d| jid|iS )zGet the identifying parameters.r
   r   )r   r
   )r   _model_kwargsr   r   r   _identifying_params/   s   
zChatGLM._identifying_paramspromptstoprun_managerkwargsc              
   K   sZ  | j pi }ddi}|| j| j| j| jd}|| || td|  ztj	| j
||d}W n tjjyG }	 ztd|	 d}	~	ww td|  |jd	kr\td
| z$| }
t|
trxd}||
v rq|
| }ntd|
 td|
 W n tjjy }	 ztd|	 d|j d}	~	ww |durt||}| jr|
d | _|S )aw  Call out to a ChatGLM LLM inference endpoint.

        Args:
            prompt: The prompt to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            The string generated by the model.

        Example:
            .. code-block:: python

                response = chatglm_llm.invoke("Who are you?")
        zContent-Typezapplication/json)r   r   r   
max_lengthr   zChatGLM payload: )headersjsonz$Error raised by inference endpoint: NzChatGLM response:    zFailed with response: responsezNo content in response : zUnexpected response type: z?Error raised during decoding response from inference endpoint: z.
Response: r   )r   r   r   r   r   updateloggerdebugrequestspostr
   
exceptionsRequestException
ValueErrorstatus_coder   
isinstancedictJSONDecodeErrortextr   r   )r   r   r   r   r   r   r   payloadr    eZparsed_responseZcontent_keysr-   r   r   r   _call8   sT   







zChatGLM._call)NN)__name__
__module____qualname____doc__r
   str__annotations__r   r   r+   r   intr   floatr   r   r   r   boolpropertyr   r   r   r   r   r0   r   r   r   r   r	      sB   
 
r	   )loggingtypingr   r   r   r   r$   Zlangchain_core.callbacksr   Z#langchain_core.language_models.llmsr   Zlangchain_community.llms.utilsr   	getLoggerr1   r"   r	   r   r   r   r   <module>   s    
