o
    «©Zh  ã                   @   sL   d Z ddlZddlmZmZmZmZ ddlmZ g d¢Z	G dd„ deƒZ
dS )z4Callback Handler streams to stdout on new llm token.é    N)ÚAnyÚDictÚListÚOptional)ÚStreamingStdOutCallbackHandler)ÚFinalZAnswerú:c                	       s    e Zd ZdZdeddfdd„Zdefdd„Zdd	d
dœdee	e  dededdf‡ fdd„Z
deeef de	e deddfdd„Zdededdfdd„Z‡  ZS )Ú#FinalStreamingStdOutCallbackHandlerz¦Callback handler for streaming in agents.
    Only works with agents using LLMs that support streaming.

    Only the final output of the agent will be streamed.
    ÚtokenÚreturnNc                 C   sP   | j  |¡ | j | ¡ ¡ t| j ƒt| jƒkr&| j  d¡ | j d¡ d S d S )Nr   )Úlast_tokensÚappendÚlast_tokens_strippedÚstripÚlenÚanswer_prefix_tokensÚpop)Úselfr
   © r   úf/var/www/html/lang_env/lib/python3.10/site-packages/langchain/callbacks/streaming_stdout_final_only.pyÚappend_to_last_tokens   s   þz9FinalStreamingStdOutCallbackHandler.append_to_last_tokensc                 C   s   | j r	| j| jkS | j| jkS )N)Ústrip_tokensr   Úanswer_prefix_tokens_strippedr   r   )r   r   r   r   Úcheck_if_answer_reached   s   z;FinalStreamingStdOutCallbackHandler.check_if_answer_reachedTF)r   r   Ústream_prefixr   r   r   c                   sz   t ƒ  ¡  |du rt| _n|| _|rdd„ | jD ƒ| _n| j| _dgt| jƒ | _dgt| jƒ | _|| _|| _	d| _
dS )aÊ  Instantiate FinalStreamingStdOutCallbackHandler.

        Args:
            answer_prefix_tokens: Token sequence that prefixes the answer.
                Default is ["Final", "Answer", ":"]
            strip_tokens: Ignore white spaces and new lines when comparing
                answer_prefix_tokens to last tokens? (to determine if answer has been
                reached)
            stream_prefix: Should answer prefix itself also be streamed?
        Nc                 S   s   g | ]}|  ¡ ‘qS r   )r   )Ú.0r
   r   r   r   Ú
<listcomp>6   s    ÿz@FinalStreamingStdOutCallbackHandler.__init__.<locals>.<listcomp>Ú F)ÚsuperÚ__init__ÚDEFAULT_ANSWER_PREFIX_TOKENSr   r   r   r   r   r   r   Úanswer_reached)r   r   r   r   ©Ú	__class__r   r   r      s   

ÿ
z,FinalStreamingStdOutCallbackHandler.__init__Ú
serializedÚpromptsÚkwargsc                 K   s
   d| _ dS )zRun when LLM starts running.FN)r!   )r   r$   r%   r&   r   r   r   Úon_llm_startA   s   
z0FinalStreamingStdOutCallbackHandler.on_llm_startc                 K   sh   |   |¡ |  ¡ r"d| _| jr | jD ]}tj |¡ qtj ¡  dS | jr2tj |¡ tj ¡  dS dS )z?Run on new LLM token. Only available when streaming is enabled.TN)	r   r   r!   r   r   ÚsysÚstdoutÚwriteÚflush)r   r
   r&   Útr   r   r   Úon_llm_new_tokenG   s   


þz4FinalStreamingStdOutCallbackHandler.on_llm_new_token)Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ústrr   Úboolr   r   r   r   r   r   r'   r-   Ú__classcell__r   r   r"   r   r	      s4    	û
ýüûú"
ÿÿÿ
þr	   )r1   r(   Útypingr   r   r   r   Zlangchain_core.callbacksr   r    r	   r   r   r   r   Ú<module>   s    