o
    ZhQ                     @  s  d dl mZ d dlZd dlZd dlZd dlmZmZmZm	Z	m
Z
 d dlm  mZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d d	lmZ d]ddZd^ddZd_ddZd`ddZdad d!Z	dbdddd"dcd*d+Z	dbdddd"dcd,d-Zddddddddd.ddd9d:Zd;Z d;ddddddddddd<dedBdCZ!e"e!j#d;ddddddddddd<dfdEdFZ$ej%j&ej%j'fdGkrdHdIiZ(ni Z(ej)e"ej*j#ej+dgi e(dJdKiG dLdM dMej*Z*dhdRdSZ,		didjdTdUZ-		didkdVdWZ.dddde dddXdld[d\Z/dS )m    )annotationsN)AnyIterableListOptionalUnion)get_default_discuss_client) get_default_discuss_async_client)string_utils)discuss_types)model_types)safety_typescontentdiscuss_types.MessageOptionsreturnglm.Messagec                 C  s0   t | tjr| S t | trtj| dS t| S )z9Creates a `glm.Message` object from the provided content.r   )
isinstanceglmMessagestrr    r   R/var/www/html/lang_env/lib/python3.10/site-packages/google/generativeai/discuss.py_make_message!   s
   

r   messagesdiscuss_types.MessagesOptionsList[glm.Message]c                 C  s   t | tttjfrt| g} ndd | D } tdd | ddd D }|s)d}nt|dkr4| }nt	
d	td
d | ddd D }|sLd}nt|dkrW| }nt	
d	tdd | D rg| S ||g}t| D ]\}}||d  |_qo| S )a'  
    Creates a list of `glm.Message` objects from the provided messages.

    This function takes a variety of message content inputs, such as strings, dictionaries,
    or `glm.Message` objects, and creates a list of `glm.Message` objects. It ensures that
    the authors of the messages alternate appropriately. If authors are not provided,
    default authors are assigned based on their position in the list.

    Args:
        messages: The messages to convert.

    Returns:
        A list of `glm.Message` objects with alternating authors.
    c                 S  s   g | ]}t |qS r   )r   ).0messager   r   r   
<listcomp>?   s    z"_make_messages.<locals>.<listcomp>c                 s      | ]	}|j r|j V  qd S Nauthorr   msgr   r   r   	<genexpr>A       z!_make_messages.<locals>.<genexpr>N   0   z$Authors are not strictly alternatingc                 s  r    r!   r"   r$   r   r   r   r&   I   r'   1c                 s  s    | ]}|j V  qd S r!   r"   r$   r   r   r   r&   Q   s    )r   r   dictr   r   r   setlenpopr   ZAuthorErrorall	enumerater#   )r   Zeven_authorsZeven_authorZodd_authorsZ
odd_authorZauthorsir%   r   r   r   _make_messages+   s*   



r3   itemdiscuss_types.ExampleOptionsglm.Examplec                 C  s   t | tjr| S t | tr&|  } t| d | d< t| d | d< t| S t | tr<t| \}}tjt|t|dS t| S )z6Creates a `glm.Example` object from the provided item.inputoutputr7   r8   )r   r   Exampler,   copyr   r   list)r4   r7   r8   r   r   r   _make_example[   s   



r=   examples"List[discuss_types.MessageOptions]List[glm.Example]c                 C  s   t | d dkrttdt |  dg }g }t| D ]&\}}t|}|| |d dkr1qtj|d |d d}|| g }q|S )a  
    Creates a list of `glm.Example` objects from a list of message options.

    This function takes a list of `discuss_types.MessageOptions` and pairs them into
    `glm.Example` objects. The input examples must be in pairs to create valid examples.

    Args:
        examples: The list of `discuss_types.MessageOptions`.

    Returns:
        A list of `glm.Example objects` created by pairing up the provided messages.

    Raises:
        ValueError: If the provided list of examples is not of even length.
    r(   r   zt            You must pass `Primer` objects, pairs of messages, or an *even* number of messages, got: 
              z	 messagesr*   r9   )	r.   
ValueErrortextwrapdedentr1   r   appendr   r:   )r>   resultpairnr4   r%   Zprimerr   r   r   _make_examples_from_flatn   s,   

rH   discuss_types.ExamplesOptionsc                 C  s   t | tjr	| gS t | trt| gS t| } | s| S | d }t |tr9d|v r,t| S d|v r4d|v s8tdn
t |tj	rCt| S g }| D ]	}|
t| qG|S )a  
    Creates a list of `glm.Example` objects from the provided examples.

    This function takes various types of example content inputs and creates a list
    of `glm.Example` objects. It handles the conversion of different input types and ensures
    the appropriate structure for creating valid examples.

    Args:
        examples: The examples to convert.

    Returns:
        A list of `glm.Example` objects created from the provided examples.
    r   r   r7   r8   zTTo create an `Example` from a dict you must supply both `input` and an `output` keys)r   r   r:   r,   r=   r<   rH   	TypeErrorr   ZMESSAGE_OPTIONSrD   )r>   firstrE   r4   r   r   r   _make_examples   s,   


rL   contextr>   r   prompt"discuss_types.MessagePromptOptionsrN   
str | None$discuss_types.ExamplesOptions | None$discuss_types.MessagesOptions | Noneglm.MessagePromptc                C  s   | du rt |||d} n$|dup|dup|du}|rtdt| tjr&| S t| t r,nd| i} t|  }|tj	sFt
d|tj	  | dd}|durVt|| d< | dd}|durft|| d< dd |  D } | S )	a  
    Creates a `glm.MessagePrompt` object from the provided prompt components.

    This function constructs a `glm.MessagePrompt` object using the provided `context`, `examples`,
    or `messages`. It ensures the proper structure and handling of the input components.

    Either pass a `prompt` or it's component `context`, `examples`, `messages`.

    Args:
        prompt: The complete prompt components.
        context: The context for the prompt.
        examples: The examples for the prompt.
        messages: The messages for the prompt.

    Returns:
        A `glm.MessagePrompt` object created from the provided prompt components.
    NrM   zWYou can't set `prompt`, and its fields `(context, examples, messages)` at the same timer   z.Found extra entries in the prompt dictionary: r>   c                 S  s   i | ]\}}|d ur||qS r!   r   )r   kvr   r   r   
<dictcomp>  s    z-_make_message_prompt_dict.<locals>.<dictcomp>)r,   rA   r   r   MessagePromptr-   keysissubsetr   ZMESSAGE_PROMPT_KEYSKeyErrorgetrL   r3   items)rO   rN   r>   r   Zflat_promptrY   r   r   r   _make_message_prompt_dict   s:   
r^   c                C  s   t | |||d} t| S )zICreates a `glm.MessagePrompt` object from the provided prompt components.rO   rN   r>   r   )r^   r   rX   r_   r   r   r   _make_message_prompt  s   
r`   )rN   r>   r   temperaturecandidate_counttop_ptop_krO   model&model_types.AnyModelNameOptions | Nonera   float | Nonerb   
int | Nonerc   rd   )discuss_types.MessagePromptOptions | Noneglm.GenerateMessageRequestc        	   	      C  s0   t | } t||||d}tj| |||||dS )zFCreates a `glm.GenerateMessageRequest` object for generating messages.r_   )re   rO   ra   rc   rd   rb   )r   make_model_namer`   r   ZGenerateMessageRequest	re   rN   r>   r   ra   rb   rc   rd   rO   r   r   r   _make_generate_message_request  s   
rm   zmodels/chat-bison-001)re   rN   r>   r   ra   rb   rc   rd   rO   clientrequest_optionsrn   glm.DiscussServiceClient | Nonero   dict[str, Any] | Nonediscuss_types.ChatResponsec                 C  s(   t | ||||||||d	}t|	||
dS )aU  Calls the API and returns a `types.ChatResponse` containing the response.

    Args:
        model: Which model to call, as a string or a `types.Model`.
        context: Text that should be provided to the model first, to ground the response.

            If not empty, this `context` will be given to the model first before the
            `examples` and `messages`.

            This field can be a description of your prompt to the model to help provide
            context and guide the responses.

            Examples:

            * "Translate the phrase from English to French."
            * "Given a statement, classify the sentiment as happy, sad or neutral."

            Anything included in this field will take precedence over history in `messages`
            if the total input size exceeds the model's `Model.input_token_limit`.
        examples: Examples of what the model should generate.

            This includes both the user input and the response that the model should
            emulate.

            These `examples` are treated identically to conversation messages except
            that they take precedence over the history in `messages`:
            If the total input size exceeds the model's `input_token_limit` the input
            will be truncated. Items will be dropped from `messages` before `examples`
        messages: A snapshot of the conversation history sorted chronologically.

            Turns alternate between two authors.

            If the total input size exceeds the model's `input_token_limit` the input
            will be truncated: The oldest items will be dropped from `messages`.
        temperature: Controls the randomness of the output. Must be positive.

            Typical values are in the range: `[0.0,1.0]`. Higher values produce a
            more random and varied response. A temperature of zero will be deterministic.
        candidate_count: The **maximum** number of generated response messages to return.

            This value must be between `[1, 8]`, inclusive. If unset, this
            will default to `1`.

            Note: Only unique candidates are returned. Higher temperatures are more
            likely to produce unique candidates. Setting `temperature=0.0` will always
            return 1 candidate regardless of the `candidate_count`.
        top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
            top-k sampling.

            `top_k` sets the maximum number of tokens to sample from on each step.
        top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
           top-k sampling.

           `top_p` configures the nucleus sampling. It sets the maximum cumulative
            probability of tokens to sample from.

            For example, if the sorted probabilities are
            `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
            as `[0.625, 0.25, 0.125, 0, 0, 0]`.

            Typical values are in the `[0.9, 1.0]` range.
        prompt: You may pass a `types.MessagePromptOptions` **instead** of a
            setting `context`/`examples`/`messages`, but not both.
        client: If you're not relying on the default client, you pass a
            `glm.DiscussServiceClient` instead.
        request_options: Options for the request.

    Returns:
        A `types.ChatResponse` containing the model's reply.
    rl   rn   requestro   )rm   _generate_responsere   rN   r>   r   ra   rb   rc   rd   rO   rn   ro   rt   r   r   r   chat3  s   Trw   $glm.DiscussServiceAsyncClient | Nonec                   s0   t | ||||||||d	}t|	||
dI d H S )Nrl   rs   )rm   _generate_response_asyncrv   r   r   r   
chat_async  s   rz   )   
   kw_onlyTinitFc                   @  s   e Zd ZU ejdd ddZded< dd Zee	
ejjjdddZejdddZe	
ejjj	ddddZe	
ejjjdddZdS )ChatResponsec                   C  s   d S r!   r   r   r   r   r   <lambda>  s    zChatResponse.<lambda>F)defaultreprrp   _clientc                 K  s"   |  D ]
\}}t| || qd S r!   )r]   setattr)selfkwargskeyvaluer   r   r   __init__  s   zChatResponse.__init__r   rQ   c                 C  s   | j d r| j d d S d S )Nr   )r   )r   r   r   r   last  s   
zChatResponse.lastr   r   c                 C  s$   t |}t||}|| jd< d S )Nr   )r   typeto_dictr   )r   r   r   r   r   r     s   Nro   rq   rr   c                 C  s   t | jtjrtd| jd u rtd| j |  }|	d |	dd  t
|d |d< |d t| tdi |}t|| j|dS )NzBreply can't be called on an async client, use reply_async instead.zThe last response from the model did not return any candidates.
Check the `.filters` attribute to see why the responses were filtered:

candidatesfiltersr   )rt   rn   ro   r   )r   r   r   ZDiscussServiceAsyncClientrJ   r   rA   r   r   r/   r<   rD   r   rm   ru   )r   r   ro   rt   r   r   r   reply  s"   

zChatResponse.replyc                   sz   t | jtjrtd|  }|d |dd  t|d |d< |d t	| t
di |}t|| jdI d H S )NzEreply_async can't be called on a non-async client, use reply instead.r   r   r   )rt   rn   r   )r   r   r   ZDiscussServiceClientrJ   r   r/   r<   rD   r   rm   ry   )r   r   rt   r   r   r   reply_async  s   
zChatResponse.reply_async)r   rQ   )r   r   r!   )r   r   ro   rq   r   rr   )r   r   r   rr   )__name__
__module____qualname__dataclassesfieldr   __annotations__r   propertyr
   set_docr   r   r   __doc__setterr   r   r   r   r   r   r     s   
 r   rt   responseglm.GenerateMessageResponse8glm.DiscussServiceClient | glm.DiscussServiceAsyncClientc                 C  s   t | | } | d}|d | d< |d | d< |d | d< t ||}|d t|d |d< |d r>|d d }nd }| d | | dd  | d	d  tdd
|i|| S )NrO   r>   rN   r   r   r   r   ra   rb   r   r   )r   r   r/   r   Zconvert_filters_to_enumsrD   
setdefaultr   )rt   r   rn   rO   r   r   r   r   _build_chat_response  s   

r   c                 C  s8   |d u ri }|d u rt  }|j| fi |}t| ||S r!   )r   generate_messager   rt   rn   ro   r   r   r   r   ru     s   ru   c                   s@   |d u ri }|d u rt  }|j| fi |I d H }t| ||S r!   )r	   r   r   r   r   r   r   ry   )  s   ry   )rO   rN   r>   r   re   rn   ro   model_types.AnyModelNameOptionsdiscuss_types.TokenCountc                 C  sX   t |}t| |||d} |d u ri }|d u rt }|jd|| d|}t||S )NrM   )re   rO   r   )r   rk   r`   r   count_message_tokensr   r   )rO   rN   r>   r   re   rn   ro   rE   r   r   r   r   9  s   

r   )r   r   r   r   )r   r   r   r   )r4   r5   r   r6   )r>   r?   r   r@   )r>   rI   r   r@   r!   )
rO   rP   rN   rQ   r>   rR   r   rS   r   rT   )re   rf   rN   rQ   r>   rR   r   rS   ra   rg   rb   rh   rc   rg   rd   rg   rO   ri   r   rj   )re   rf   rN   rQ   r>   rR   r   rS   ra   rg   rb   rh   rc   rg   rd   rg   rO   ri   rn   rp   ro   rq   r   rr   )re   rf   rN   rQ   r>   rR   r   rS   ra   rg   rb   rh   rc   rg   rd   rg   rO   ri   rn   rx   ro   rq   r   rr   r   )rt   rj   r   r   rn   r   r   r   )NN)rt   rj   rn   rp   ro   rq   r   r   )rt   rj   rn   rx   ro   rq   r   r   )rO   rP   rN   rQ   r>   rR   r   rS   re   r   rn   rx   ro   rq   r   r   )0
__future__r   r   sysrB   typingr   r   r   r   r   Zgoogle.ai.generativelanguageZaiZgenerativelanguager   Zgoogle.generativeai.clientr   r	   Zgoogle.generativeair
   Zgoogle.generativeai.typesr   r   r   r   r3   r=   rH   rL   r^   r`   rm   ZDEFAULT_DISCUSS_MODELrw   r   r   rz   version_infomajorminorZDATACLASS_KWARGSZprettyprintr   	dataclassr   ru   ry   r   r   r   r   r   <module>   s   



0

*1>
c

?