o
    Zh3                     @  s  d dl mZ d dlZd dlmZmZ d dlZd dlmZmZm	Z	m
Z
 d dlm  mZ d dlmZ d dlmZ d dlmZ d dlmZ d d	lmZ d d
lmZ dZdZzejZW n eyj   e
dZdLddZY nw dMddZeddddddddd	dNd+d,Zedddddddddd-
dOd4d5Z ej!ej"d6d7G d8d9 d9ej#Z#		dPdQd<d=Z$		dPdRd?d@Z%e			dPdSdDdEZ&e			dPdTdHdEZ&		dPdUdKdEZ&dS )V    )annotationsN)IterableSequence)Anyr   overloadTypeVar)get_default_text_client)string_utils)
text_types)model_types)models)safety_typeszmodels/text-bison-001d   TiterableIterable[T]nintreturnIterable[list[T]]c                 c  sX    |dk rt d| g }| D ]}|| t||kr"|V  g }q|r*|V  d S d S )N   z Batch size `n` must be >1, got: )
ValueErrorappendlen)r   r   batchitem r   O/var/www/html/lang_env/lib/python3.10/site-packages/google/generativeai/text.py_batched(   s   

r   promptstr | dict[str, str]glm.TextPromptc                 C  s6   t | trtj| dS t | trt| S td dS )aP  
    Creates a `glm.TextPrompt` object based on the provided prompt input.

    Args:
        prompt: The prompt input, either a string or a dictionary.

    Returns:
        glm.TextPrompt: A TextPrompt object containing the prompt text.

    Raises:
        TypeError: If the provided prompt is neither a string nor a dictionary.
    )textz.Expected string or dictionary for text prompt.N)
isinstancestrglmZ
TextPromptdict	TypeErrorr   r   r   r   _make_text_prompt6   s
   


r)   	modelr   temperaturecandidate_countmax_output_tokenstop_ptop_ksafety_settingsstop_sequencesr+   model_types.AnyModelNameOptions
str | Noner,   float | Noner-   
int | Noner.   r/   r0   r1   (safety_types.SafetySettingOptions | Noner2   str | Iterable[str] | Noneglm.GenerateTextRequestc        	   	      C  sZ   t | } t|d}tj|dd}t|tr|g}|rt|}tj	| ||||||||d	S )a  
    Creates a `glm.GenerateTextRequest` object based on the provided parameters.

    This function generates a `glm.GenerateTextRequest` object with the specified
    parameters. It prepares the input parameters and creates a request that can be
    used for generating text using the chosen model.

    Args:
        model: The model to use for text generation.
        prompt: The prompt for text generation. Defaults to None.
        temperature: The temperature for randomness in generation. Defaults to None.
        candidate_count: The number of candidates to consider. Defaults to None.
        max_output_tokens: The maximum number of output tokens. Defaults to None.
        top_p: The nucleus sampling probability threshold. Defaults to None.
        top_k: The top-k sampling parameter. Defaults to None.
        safety_settings: Safety settings for generated text. Defaults to None.
        stop_sequences: Stop sequences to halt text generation. Can be a string
             or iterable of strings. Defaults to None.

    Returns:
        `glm.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters.
    r(   old)Zharm_category_setr*   )
r   make_model_namer)   r   Znormalize_safety_settingsr#   r$   listr%   ZGenerateTextRequestr*   r   r   r   _make_generate_text_requestK   s(   
"

r=   )
r+   r,   r-   r.   r/   r0   r1   r2   clientrequest_optionsr$   r>   glm.TextServiceClient | Noner?   dict[str, Any] | Nonetext_types.Completionc                 C  s(   t | ||||||||d	}t|	||
dS )az  Calls the API and returns a `types.Completion` containing the response.

    Args:
        model: Which model to call, as a string or a `types.Model`.
        prompt: Free-form input text given to the model. Given a prompt, the model will
                generate text that completes the input text.
        temperature: Controls the randomness of the output. Must be positive.
            Typical values are in the range: `[0.0,1.0]`. Higher values produce a
            more random and varied response. A temperature of zero will be deterministic.
        candidate_count: The **maximum** number of generated response messages to return.
            This value must be between `[1, 8]`, inclusive. If unset, this
            will default to `1`.

            Note: Only unique candidates are returned. Higher temperatures are more
            likely to produce unique candidates. Setting `temperature=0.0` will always
            return 1 candidate regardless of the `candidate_count`.
        max_output_tokens: Maximum number of tokens to include in a candidate. Must be greater
                           than zero. If unset, will default to 64.
        top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
            `top_k` sets the maximum number of tokens to sample from on each step.
        top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
            `top_p` configures the nucleus sampling. It sets the maximum cumulative
            probability of tokens to sample from.
            For example, if the sorted probabilities are
            `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
            as `[0.625, 0.25, 0.125, 0, 0, 0]`.
        safety_settings: A list of unique `types.SafetySetting` instances for blocking unsafe content.
           These will be enforced on the `prompt` and
           `candidates`. There should not be more than one
           setting for each `types.SafetyCategory` type. The API will block any prompts and
           responses that fail to meet the thresholds set by these settings. This list
           overrides the default settings for each `SafetyCategory` specified in the
           safety_settings. If there is no `types.SafetySetting` for a given
           `SafetyCategory` provided in the list, the API will use the default safety
           setting for that category.
        stop_sequences: A set of up to 5 character sequences that will stop output generation.
          If specified, the API will stop at the first appearance of a stop
          sequence. The stop sequence will not be included as part of the response.
        client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
        request_options: Options for the request.

    Returns:
        A `types.Completion` containing the model's text completion response.
    r*   )r>   requestr?   )r=   _generate_response)r+   r   r,   r-   r.   r/   r0   r1   r2   r>   r?   rC   r   r   r   generate_text   s   :rE   F)initc                   @  s   e Zd Zdd ZdS )
Completionc                 K  sB   |  D ]
\}}t| || qd | _| jr| jd d | _d S d S )Nr   output)itemssetattrresult
candidates)selfkwargskeyvaluer   r   r   __init__   s   zCompletion.__init__N)__name__
__module____qualname__rQ   r   r   r   r   rG      s    rG   rC   glm.TextServiceClientc                 C  s   |du ri }|du rt  }|j| fi |}t||}t|d |d< t|d |d< t|d |d< tdd|i|S )a  
    Generates a response using the provided `glm.GenerateTextRequest` and client.

    Args:
        request: The text generation request.
        client: The client to use for text generation. Defaults to None, in which
            case the default text client is used.
        request_options: Options for the request.

    Returns:
        `Completion`: A `Completion` object with the generated text and response information.
    NfiltersZsafety_feedbackrL   Z_clientr   )	r   rE   typeto_dictr   Zconvert_filters_to_enumsZ convert_safety_feedback_to_enumsZconvert_candidate_enumsrG   )rC   r>   r?   responser   r   r   rD      s   rD   text_types.TokenCountc                 C  sR   t | }|d u ri }|d u rt }|jtj|d|idfi |}t||S )Nr"   )r+   r   )r   Zget_base_model_namer   count_text_tokensr%   ZCountTextTokensRequestrW   rX   )r+   r   r>   r?   Z
base_modelrK   r   r   r   r[      s   
r[    model_types.BaseModelNameOptionsr"   text_types.EmbeddingDictc                 C     d S Nr   r+   r"   r>   r?   r   r   r   generate_embeddings     ra   Sequence[str]text_types.BatchEmbeddingDictc                 C  r^   r_   r   r`   r   r   r   ra     rb   str | Sequence[str]8text_types.EmbeddingDict | text_types.BatchEmbeddingDictc           	      C  s   t | } |du ri }|du rt }t|tr8tj| |d}|j|fi |}t|	|}|d d |d< |S dg i}t
|tD ]'}tj| |d}|j|fi |}t|	|}|d dd |d D  qA|S )	a$  Calls the API to create an embedding for the text passed in.

    Args:
        model: Which model to call, as a string or a `types.Model`.

        text: Free-form input text given to the model. Given a string, the model will
              generate an embedding based on the input text.

        client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.

        request_options: Options for the request.

    Returns:
        Dictionary containing the embedding (list of float values) for the input text.
    N)r+   r"   Z	embeddingrP   )r+   Ztextsc                 s  s    | ]}|d  V  qdS )rP   Nr   ).0er   r   r   	<genexpr>S  s    z&generate_embeddings.<locals>.<genexpr>Z
embeddings)r   r;   r   r#   r$   r%   ZEmbedTextRequestZ
embed_textrW   rX   r   EMBEDDING_MAX_BATCH_SIZEZBatchEmbedTextRequestZbatch_embed_textextend)	r+   r"   r>   r?   Zembedding_requestZembedding_responseZembedding_dictrK   r   r   r   r   ra   $  s4   

)r   r   r   r   r   r   )r   r    r   r!   )r+   r3   r   r4   r,   r5   r-   r6   r.   r6   r/   r6   r0   r6   r1   r7   r2   r8   r   r9   )r+   r3   r   r$   r,   r5   r-   r6   r.   r6   r/   r5   r0   r5   r1   r7   r2   r8   r>   r@   r?   rA   r   rB   )NN)rC   r9   r>   rU   r?   rA   r   rG   )
r+   r3   r   r$   r>   r@   r?   rA   r   rZ   )
r+   r\   r"   r$   r>   rU   r?   rA   r   r]   )
r+   r\   r"   rc   r>   rU   r?   rA   r   rd   )
r+   r\   r"   re   r>   rU   r?   rA   r   rf   )'
__future__r   dataclassescollections.abcr   r   	itertoolstypingr   r   r   Zgoogle.ai.generativelanguageZaiZgenerativelanguager%   Zgoogle.generativeai.clientr   Zgoogle.generativeair	   Zgoogle.generativeai.typesr
   r   r   r   ZDEFAULT_TEXT_MODELrj   Zbatchedr   AttributeErrorr   r)   r=   rE   Zprettyprint	dataclassrG   rD   r[   ra   r   r   r   r   <module>   s|   

;I
&