o
    Zh                     @   s   d dl mZ d dlmZmZmZmZmZ ddlm	Z	m
Z
mZ G dd deZdd Zd	d
ddedeeeef  deeed	f dedef
ddZededeed	f defddZd	S )    )	lru_cache)CallableDictListOptionalUnion   )HfHubHTTPErrorRepositoryNotFoundErroris_minijinja_availablec                   @   s   e Zd ZdZdS )TemplateErrorzAAny error raised while trying to fetch or render a chat template.N)__name__
__module____qualname____doc__ r   r   \/var/www/html/lang_env/lib/python3.10/site-packages/huggingface_hub/inference/_templating.pyr      s    r   c                  C   s   t  stddd l} | S )NzOCannot render template. Please install minijinja using `pip install minijinja`.r   )r   ImportError	minijinja)r   r   r   r   _import_minijinja   s   r   NT)tokenadd_generation_promptmodel_idmessagesr   r   returnc              
   K   sZ   t  }t| |d}z|d||d|W S  |jy, } ztd|  d| |d}~ww )a   Render a chat prompt using a model's chat template.

    Args:
        model_id (`str`):
            The model id.
        messages (`List[Dict[str, str]]`):
            The list of messages to render.
        token (`str` or `bool`, *optional*):
            Hugging Face token. Will default to the locally saved token if not provided.

    Returns:
        `str`: The rendered chat prompt.

    Raises:
        `TemplateError`: If there's any issue while fetching, compiling or rendering the chat template.
    )r   r   )r   r   z4Error while trying to render chat prompt for model '': Nr   )r   _fetch_and_compile_templater   )r   r   r   r   kwargsr   templateer   r   r   render_chat_prompt   s   r    c           
   
      s  ddl m} t }z||d| j}W n+ ty) } z	td|  d|d}~w ty? } ztd|  d| |d}~ww |du rLtd	|  d
|d}|du r]td|  d
|ddu rltd|  d
|d }t	|t
stdt| d|  di | D ]%\}}	d|v rt	|	t
r|	|< qt	|	tr|	ddkr|	d|< q|  z d| W n |jy } ztd|  d| |d}~ww  fddS )a  Fetch and compile a model's chat template.

    Method is cached to avoid fetching the same model's config multiple times.

    Args:
        model_id (`str`):
            The model id.
        token (`str` or `bool`, *optional*):
            Hugging Face token. Will default to the locally saved token if not provided.

    Returns:
        `Callable`: A callable that takes a list of messages and returns the rendered chat prompt.
    r   )HfApi)r   z$Cannot render chat template: model 'z' not found.Nz5Error while trying to fetch chat template for model 'r   zConfig not found for model 'z'.tokenizer_configz&Tokenizer config not found for model 'chat_templatez7Chat template not found in tokenizer_config for model 'z%Chat template must be a string, not 'z
' (model: z).r   Z__typeZ
AddedTokencontentz7Error while trying to compile chat template for model 'c                     s    j di | S )Nr#   )r#   )Zrender_template)r   envZspecial_tokensr   r   <lambda>i   s    z-_fetch_and_compile_template.<locals>.<lambda>)Zhuggingface_hub.hf_apir!   r   Z
model_infoconfigr
   r   r	   get
isinstancestrtypeitemsdictEnvironmentZadd_template)
r   r   r!   r   r(   r   r"   r#   keyvaluer   r%   r   r   4   sJ   



r   )	functoolsr   typingr   r   r   r   r   utilsr	   r
   r   	Exceptionr   r   r+   boolr    r   r   r   r   r   <module>   s(    
!$