o
    ͩZh                     @  s   d dl mZ d dlZd dlmZmZmZ d dlmZ d dl	m
Z
 d dlmZ ddlmZ d	d
lmZmZ d	dlmZ ddlmZ ddlmZ d	dlmZ erTd dlmZ dddZG dd deZG dd dZdS )    )annotationsN)TYPE_CHECKINGOptionalcast)ArgumentParser)partial)
Completion   )
get_client   )	NOT_GIVEN
NotGivenOr)is_given)CLIError)	BaseModel)Stream)_SubParsersAction	subparser!_SubParsersAction[ArgumentParser]returnNonec                 C  s  |  d}|jddddd |jddd	d
 |jdddd |jdddtd |jdddtd |jdddtd |jdddtd |jddtd |jddtd |jd d!dd |jd"d#td |jd$d%td |jd&d'd
 |jd(d)d
 |jd*d+d
 |jtjtd, d S )-Nzcompletions.createz-mz--modelzThe model to useT)helprequiredz-pz--promptz#An optional prompt to complete from)r   z--streamzStream tokens as they're ready.
store_true)r   actionz-Mz--max-tokensz(The maximum number of tokens to generate)r   typez-tz--temperaturezWhat sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.

Mutually exclusive with `top_p`.z-Pz--top_pa  An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered.

            Mutually exclusive with `temperature`.z-nz--nz5How many sub-completions to generate for each prompt.z
--logprobsa  Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.z	--best_ofzGenerates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.z--echoz2Echo back the prompt in addition to the completionz--frequency_penaltyzPositive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.z--presence_penaltyzPositive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.z--suffixz:The suffix that comes after a completion of inserted text.z--stopz3A stop sequence at which to stop generating tokens.z--userzbA unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.)funcZ
args_model)
add_parseradd_argumentintfloatset_defaultsCLICompletionscreateCLICompletionCreateArgs)r   sub r&   R/var/www/html/lang_env/lib/python3.10/site-packages/openai/cli/_api/completions.pyregister   sx   
r(   c                   @  s   e Zd ZU ded< dZded< dZded< eZd	ed
< eZded< eZ	ded< eZ
ded< eZded< eZd	ed< eZded< eZd	ed< eZd	ed< eZded< eZded< eZded< dS )r$   strmodelFboolstreamNzOptional[str]promptzNotGivenOr[int]nzNotGivenOr[str]stopuserzNotGivenOr[bool]echosuffixbest_ofzNotGivenOr[float]top_plogprobs
max_tokenstemperaturepresence_penaltyfrequency_penalty)__name__
__module____qualname____annotations__r,   r-   r   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r&   r&   r&   r'   r$   ]   s    
 r$   c                   @  s6   e Zd ZedddZedd	d
ZedddZdS )r"   argsr$   r   r   c                 C  s   t | jr| jdkr| jrtdtt jj| j| j| j	| j
| j| j| j| j| j| j| j| j| j| jd}| jrEtttt |ddS t| S )N   z6Can't stream completions with n>1 with the current CLI)r.   r1   r/   r0   r*   r4   r-   r2   r3   r5   r6   r7   r8   r9   T)r,   )r   r.   r,   r   r   r
   Zcompletionsr#   r1   r/   r0   r*   r4   r-   r2   r3   r5   r6   r7   r8   r9   r"   _stream_creater   r   r   _create)r>   Zmake_requestr&   r&   r'   r#   q   s0   zCLICompletions.create
completionr   c                 C  sj   t | jdk}| jD ](}|rtjd|j tj|j |s'|jds-tjd tj	  q
d S )Nr?   z===== Completion {} =====

)
lenchoicessysstdoutwriteformatindextextendswithflush)rB   should_print_headerchoicer&   r&   r'   rA      s   
zCLICompletions._creater,   Stream[Completion]c                 C  s   | D ]5}t |jdk}t|jdd dD ]"}|r"tjd|j tj|j |r1tjd tj	  qqtjd d S )Nr?   c                 S  s   | j S )N)rJ   )cr&   r&   r'   <lambda>   s    z/CLICompletions._stream_create.<locals>.<lambda>)keyz===== Chat Completion {} =====
rC   )
rD   rE   sortedrF   rG   rH   rI   rJ   rK   rM   )r,   rB   rN   rO   r&   r&   r'   r@      s   zCLICompletions._stream_createN)r>   r$   r   r   )rB   r   r   r   )r,   rP   r   r   )r:   r;   r<   staticmethodr#   rA   r@   r&   r&   r&   r'   r"   p   s    r"   )r   r   r   r   )
__future__r   rF   typingr   r   r   argparser   	functoolsr   Zopenai.types.completionr   _utilsr
   _typesr   r   r   _errorsr   Z_modelsr   Z
_streamingr   r   r(   r$   r"   r&   r&   r&   r'   <module>   s"    
H