o
    Zh'8                     @   s   d dl Z d dlZd dlmZmZmZmZmZmZ d dl	Z	d dl
mZmZ d dlmZ d dlmZ d dlmZ eeZG dd deZdS )	    N)AnyAsyncIteratorDictIteratorListOptional)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)LLM)GenerationChunk)Fieldc                   @   s  e Zd ZU dZeed< 	 dZee ed< 	 dZee	 ed< 	 e
ddd	Zeed< 	 d
Zee ed< 	 dZee ed< 	 dZee ed< 	 dZee ed< 	 dZee ed< 	 dZee ed< 	 dZee ed< 	 dZee	 ed< 	 dZee	 ed< 	 dZee	 ed< 	 dZee ed< 	 dZee ed< 	 e
ddd	Zeed< 	 e
ddd	Ze	ed< 	 e
dd d	Zeed < 	 d!Zee	 ed"< 	 e
dd#d	Zeed#< 	 e
dd$d	Zeed$< 	 g Z ee!e  ed%< 	 dZ"eed&< 	 e#d'e$ee%f fd(d)Z&e#d'e$ee%f fd*d+Z'e#d'efd,d-Z(d<d.ee!e  d'e$ee%f fd/d0Z)		d=d1ed.ee!e  d2ee* d3e%d'ef
d4d5Z+		d=d1ed.ee!e  d2ee, d3e%d'ef
d6d7Z-		d=d1ed.ee!e  d2ee* d3e%d'e.e/ f
d8d9Z0		d=d1ed.ee!e  d2ee, d3e%d'e1e/ f
d:d;Z2dS )>TextGenaz  Text generation models from WebUI.

    To use, you should have the text-generation-webui installed, a model loaded,
    and --api added as a command-line option.

    Suggested installation, use one-click installer for your OS:
    https://github.com/oobabooga/text-generation-webui#one-click-installers

    Parameters below taken from text-generation-webui api example:
    https://github.com/oobabooga/text-generation-webui/blob/main/api-examples/api-example.py

    Example:
        .. code-block:: python

            from langchain_community.llms import TextGen
            llm = TextGen(model_url="http://localhost:8500")
    	model_urlNpreset   max_new_tokensT	do_sample)aliasg?temperatureg?top_p   	typical_pr   epsilon_cutoff
eta_cutoffgzG?repetition_penalty(   top_k
min_lengthno_repeat_ngram_size	num_beamspenalty_alphalength_penaltyFearly_stoppingseedadd_bos_tokeni   truncation_lengthban_eos_tokenskip_special_tokensstopping_strings	streamingreturnc                 C   s   i d| j d| jd| jd| jd| jd| jd| jd| jd	| jd
| j	d| j
d| jd| jd| jd| jd| jd| j| j| j| j| jdS )z/Get the default parameters for calling textgen.r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r$   r%   )r&   r'   r(   r)   )r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r$   r%   r&   r'   r(   r)   self r.   W/var/www/html/lang_env/lib/python3.10/site-packages/langchain_community/llms/textgen.py_default_paramsv   sP   	
zTextGen._default_paramsc                 C   s   i d| j i| jS )zGet the identifying parameters.r   )r   r0   r,   r.   r.   r/   _identifying_params   s   zTextGen._identifying_paramsc                 C   s   dS )zReturn type of llm.Ztextgenr.   r,   r.   r.   r/   	_llm_type   s   zTextGen._llm_typestopc                 C   sH   | j r|durtd| jdu r| j}nd| ji}| j p|pg |d< |S )a  
        Performs sanity check, preparing parameters in format needed by textgen.

        Args:
            stop (Optional[List[str]]): List of stop sequences for textgen.

        Returns:
            Dictionary containing the combined parameters.
        Nz2`stop` found in both the input and default params.r   r)   )r)   
ValueErrorr   r0   )r-   r3   paramsr.   r.   r/   _get_parameters   s   

zTextGen._get_parameterspromptrun_managerkwargsc                 K   s   | j rd}| jd|||d|D ]}||j7 }q|}|S | j d}| |}	|	 }
||
d< tj||
d}|jdkrH|	 d d d	 }|S t
d
|  d}|S )  Call the textgen web API and return the output.

        Args:
            prompt: The prompt to use for generation.
            stop: A list of strings to stop generation when encountered.

        Returns:
            The generated text.

        Example:
            .. code-block:: python

                from langchain_community.llms import TextGen
                llm = TextGen(model_url="http://localhost:5000")
                llm.invoke("Write a story about llamas.")
         r7   r3   r8   /api/v1/generater7   json   resultsr   textERROR: response: Nr.   )r*   _streamrB   r   r6   copyrequestspoststatus_coder?   printr-   r7   r3   r8   r9   Zcombined_text_outputchunkresulturlr5   requestresponser.   r.   r/   _call   s*   


zTextGen._callc                    s   | j r#d}| jd|||d|2 z3 dH W }||j7 }q6 |}|S | j d}| |}	|	 }
||
d< tj||
d}|jdkrN|	 d d	 d
 }|S t
d|  d}|S )r:   r;   r<   Nr=   r7   r>   r@   rA   r   rB   rC   r.   )r*   _astreamrB   r   r6   rE   rF   rG   rH   r?   rI   rJ   r.   r.   r/   _acall   s.   

zTextGen._acallc                 k   s    zddl }W n ty   tdw i | ||}| j d}| }||d< | }	|	| |	t	| 	 |	
 }
t|
}
|
d dkr`t|
d	 dd
}|r\|j|jd |V  n|
d dkrl|	  dS q<a  Yields results objects as they are generated in real time.

        It also calls the callback manager's on_llm_new_token event with
        similar parameters to the OpenAI LLM class method of the same name.

        Args:
            prompt: The prompts to pass into the model.
            stop: Optional list of stop words to use when generating.

        Returns:
            A generator representing the stream of tokens being generated.

        Yields:
            A dictionary like objects containing a string token and metadata.
            See text-generation-webui docs and below for more.

        Example:
            .. code-block:: python

                from langchain_community.llms import TextGen
                llm = TextGen(
                    model_url = "ws://localhost:5005"
                    streaming=True
                )
                for chunk in llm.stream("Ask 'Hi, how are you?' like a pirate:'",
                        stop=["'","
"]):
                    print(chunk, end='', flush=True)  # noqa: T201

        r   Nz9The `websocket-client` package is required for streaming.z/api/v1/streamr7   TeventZtext_streamrB   )rB   Zgeneration_info)tokenZ
stream_end	websocketImportErrorr6   r   rE   Z	WebSocketconnectsendr?   dumpsrecvloadsr   Zon_llm_new_tokenrB   closer-   r7   r3   r8   r9   rW   r5   rM   rN   Zwebsocket_clientrL   rK   r.   r.   r/   rD     s<   $

zTextGen._streamc                 K  s   zddl }W n ty   tdw i | ||}| j d}| }||d< | }	|	| |	t	| 	 |	
 }
t|
}
|
d dkrct|
d	 dd
}|r_|j|jdI dH  |V  n|
d dkro|	  dS q<rS   rV   r_   r.   r.   r/   rQ   Y  s<   $

zTextGen._astream)N)NN)3__name__
__module____qualname____doc__str__annotations__r   r   r   intr   r   boolr   floatr   r   r   r   r   r   r   r   r   r    r!   r"   r$   r%   r&   r'   r(   r)   r   r*   propertyr   r   r0   r1   r2   r6   r	   rP   r   rR   r   r   rD   r   rQ   r.   r.   r.   r/   r      s   
 $

1

1

K
r   )r?   loggingtypingr   r   r   r   r   r   rF   Zlangchain_core.callbacksr   r	   Z#langchain_core.language_models.llmsr
   Zlangchain_core.outputsr   Zpydanticr   	getLoggerr`   loggerr   r.   r.   r.   r/   <module>   s     
