o
    Zh%                     @  sV  d dl mZ d dlZd dlZd dlZd dlZd dlmZmZ d dl	m
Z
 d dlmZmZmZmZmZmZmZ d dlZd dlmZ d dlmZ d dlmZ d d	lmZmZ d d
lmZmZ d dl m!Z! e"e#Z$dZ%dZ&dZ'eG dd dZ(eG dd dZ)eG dd dZ*eG dd dZ+G dd deZ,G dd deZ-G dd deZ.dS )    )annotationsN)	dataclassfield)md5)AnyIterableIteratorListOptionalTupleType)CallbackManagerForRetrieverRunDocument)
Embeddings)RunnableRunnableConfig)VectorStoreVectorStoreRetriever)
ConfigDictivAivAivAc                   @  sN   e Zd ZU dZdZded< dZded< dZd	ed
< dZd	ed< dZ	ded< dS )SummaryConfigaj  Configuration for summary generation.

    is_enabled: True if summary is enabled, False otherwise
    max_results: maximum number of results to summarize
    response_lang: requested language for the summary
    prompt_name: name of the prompt to use for summarization
      (see https://docs.vectara.com/docs/learn/grounded-generation/select-a-summarizer)
    Fbool
is_enabled   intmax_resultsZengstrresponse_langz"vectara-summary-ext-24-05-med-omniprompt_namestreamN)
__name__
__module____qualname____doc__r   __annotations__r   r   r   r    r%   r%   _/var/www/html/lang_env/lib/python3.10/site-packages/langchain_community/vectorstores/vectara.pyr      s   
 	r   c                   @  s6   e Zd ZU dZdZded< dZded< dZd	ed
< dS )	MMRConfiga  Configuration for Maximal Marginal Relevance (MMR) search.
       This will soon be deprated in favor of RerankConfig.

    is_enabled: True if MMR is enabled, False otherwise
    mmr_k: number of results to fetch for MMR, defaults to 50
    diversity_bias: number between 0 and 1 that determines the degree
        of diversity among the results with 0 corresponding
        to minimum diversity and 1 to maximum diversity.
        Defaults to 0.3.
        Note: diversity_bias is equivalent 1-lambda_mult
        where lambda_mult is the value often used in max_marginal_relevance_search()
        We chose to use that since we believe it's more intuitive to the user.
    Fr   r   2   r   mmr_k333333?floatdiversity_biasN)r    r!   r"   r#   r   r$   r)   r,   r%   r%   r%   r&   r'   .   s
   
 r'   c                   @  sB   e Zd ZU dZdZded< dZded< dZd	ed
< dZded< dS )RerankConfiga  Configuration for Reranker.

    reranker: "mmr", "rerank_multilingual_v1", "udf" or "none"
    rerank_k: number of results to fetch before reranking, defaults to 50
    mmr_diversity_bias: for MMR only - a number between 0 and 1 that determines
        the degree of diversity among the results with 0 corresponding
        to minimum diversity and 1 to maximum diversity.
        Defaults to 0.3.
        Note: mmr_diversity_bias is equivalent 1-lambda_mult
        where lambda_mult is the value often used in max_marginal_relevance_search()
        We chose to use that since we believe it's more intuitive to the user.
    user_function: for UDF only - the user function to use for reranking.
    noner   rerankerr(   r   rerank_kr*   r+   mmr_diversity_bias user_functionN)	r    r!   r"   r#   r/   r$   r0   r1   r3   r%   r%   r%   r&   r-   C   s   
 r-   c                   @  s   e Zd ZU dZdZded< dZded< dZd	ed
< dZded< dZ	ded< dZ
ded< eedZded< eedZded< 										ddddZdS ) VectaraQueryConfigaW  Configuration for Vectara query.

    k: Number of Documents to return. Defaults to 10.
    lambda_val: lexical match parameter for hybrid search.
    filter Dictionary of argument(s) to filter on metadata. For example a
        filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see
        https://docs.vectara.com/docs/search-apis/sql/filter-overview
        for more details.
    score_threshold: minimal score threshold for the result.
        If defined, results with score less than this value will be
        filtered out.
    n_sentence_before: number of sentences before the matching segment
        to add, defaults to 2
    n_sentence_after: number of sentences before the matching segment
        to add, defaults to 2
    rerank_config: RerankConfig configuration dataclass
    summary_config: SummaryConfig configuration dataclass
    
   r   k        r+   
lambda_valr2   r   filterNOptional[float]score_threshold   n_sentence_beforen_sentence_after)default_factoryr-   rerank_configr   summary_confign_sentence_contextOptional[int]
mmr_configOptional[MMRConfig]Optional[SummaryConfig]Optional[RerankConfig]c                 C  s   || _ || _|| _|| _|	r|	| _nt | _|r%|| _|| _t	dt
 n|| _|| _|
r2|
| _d S |rFtd|j|jd| _t	dt
 d S t | _d S )Nz[n_sentence_context is deprecated. Please use n_sentence_before and n_sentence_after insteadmmrr/   r0   r1   z9MMRConfig is deprecated. Please use RerankConfig instead.)r6   r8   r9   r;   rA   r   r=   r>   warningswarnDeprecationWarningr@   r-   r)   r,   )selfr6   r8   r9   r;   r=   r>   rB   rD   rA   r@   r%   r%   r&   __init__w   s:   
zVectaraQueryConfig.__init__)
r5   r7   r2   Nr<   r<   NNNN)r6   r   r8   r+   r9   r   r;   r:   r=   r   r>   r   rB   rC   rD   rE   rA   rF   r@   rG   )r    r!   r"   r#   r6   r$   r8   r9   r;   r=   r>   r   r-   r@   r   rA   rN   r%   r%   r%   r&   r4   Y   s*   
 r4   c                   @  s  e Zd ZdZ					dXdYddZedZddZd[ddZd\ddZd]d^ddZ	d_d`d$d%Z
	d_dad+d,Z		dbdcd0d1Z		ddded7d8Zdfd:d;Zdgd<d=Zdhd?d@Z	A	BdidjdFdGZe		dbdkdKdLZe		dbdldNdOZdmdQdRZdmdSdTZdndVdWZdS )oVectaraa  `Vectara API` vector store.

     See (https://vectara.com).

    Example:
        .. code-block:: python

            from langchain_community.vectorstores import Vectara

            vectorstore = Vectara(
                vectara_customer_id=vectara_customer_id,
                vectara_corpus_id=vectara_corpus_id,
                vectara_api_key=vectara_api_key
            )
    Nx   	langchainvectara_customer_idOptional[str]vectara_corpus_idvectara_api_keyvectara_api_timeoutr   sourcer   c                 C  s   |pt jd| _|pt jd| _|pt jd| _| jdu s*| jdu s*| jdu r0td n	td| j  || _	t
 | _t
jjdd}| jd	| || _dS )
zInitialize with Vectara API.ZVECTARA_CUSTOMER_IDZVECTARA_CORPUS_IDZVECTARA_API_KEYNzHCan't find Vectara credentials, customer_id or corpus_id in environment.zUsing corpus id    )max_retrieszhttp://)osenvironget_vectara_customer_id_vectara_corpus_id_vectara_api_keyloggerwarningdebug_sourcerequestsSession_sessionadaptersHTTPAdaptermountrV   )rM   rR   rT   rU   rV   rW   adapterr%   r%   r&   rN      s&   
	





zVectara.__init__returnOptional[Embeddings]c                 C  s   d S Nr%   rM   r%   r%   r&   
embeddings   s   zVectara.embeddingsdictc                 C  s   | j | jd| jdS )z=Returns headers that should be attached to each post request.zapplication/json)z	x-api-keyzcustomer-idContent-TypezX-Source)r_   r]   rc   rn   r%   r%   r&   _get_post_headers   s
   zVectara._get_post_headersdoc_idr   c              
   C  sl   | j | j|d}| jjdt|d|  | jd}|jdkr4t	
d| d|j d|j d	|j  d
S dS )z
        Delete a document from the Vectara corpus.

        Args:
            doc_id (str): ID of the document to delete.
        Returns:
            bool: True if deletion was successful, False otherwise.
        )customer_id	corpus_iddocument_idz$https://api.vectara.io/v1/delete-docT)dataverifyheaderstimeout   z#Delete request failed for doc_id = z with status code 	, reason z, text F)r]   r^   rf   postjsondumpsrr   rV   status_coder`   errorreasontext)rM   rs   bodyresponser%   r%   r&   _delete_doc   s.   

zVectara._delete_docFdocuse_core_apic           	      C  s   i }| j |d< | j|d< ||d< |rdnd}| jj|  |t|| jdd}|j}| }d|v r8|d d	 nd }|d
ksD|rF|dkrFdS |rN|dkrNdS dS )Nrt   ru   documentz$https://api.vectara.io/v1/core/indexzhttps://api.vectara.io/v1/indexT)ry   urlrw   rz   rx   statuscode  ZALREADY_EXISTSE_ALREADY_EXISTS	FORBIDDENE_NO_PERMISSIONSZE_SUCCEEDED)	r]   r^   rf   r}   rr   r~   r   rV   r   )	rM   r   r   requestZapi_endpointr   r   resultZ
status_strr%   r%   r&   
_index_doc
  s.   

zVectara._index_docidsOptional[List[str]]kwargsr   Optional[bool]c                   s"   |r fdd|D }t |S dS )zDelete by vector ID or other criteria.
        Args:
            ids: List of ids to delete.

        Returns:
            Optional[bool]: True if deletion is successful,
            False otherwise, None if not implemented.
        c                   s   g | ]}  |qS r%   )r   ).0idrn   r%   r&   
<listcomp>2  s    z"Vectara.delete.<locals>.<listcomp>T)all)rM   r   r   successr%   rn   r&   delete(  s   	zVectara.delete
files_listIterable[str]	metadatasOptional[List[dict]]	List[str]c                 K  s  g }t |D ]\}}tj|std| d q|r || ni }|t|dft|d}| 	 }	|	
d | jjd| j d| j d|d	|	| jd
}
|
jdkrg|
 d d }td| d| d q|
jdkrz|
 d d }|| qtd| d|
   q|S )ac  
        Vectara provides a way to add documents directly via our API where
        pre-processing and chunking occurs internally in an optimal way
        This method provides a way to use that API in LangChain

        Args:
            files_list: Iterable of strings, each representing a local file path.
                    Files could be text, HTML, PDF, markdown, doc/docx, ppt/pptx, etc.
                    see API docs for full list
            metadatas: Optional list of metadatas associated with each file

        Returns:
            List of ids associated with each of the files indexed
        zFile z does not exist, skippingrb)filedoc_metadatarq   z https://api.vectara.io/upload?c=z&o=z&d=TrueT)filesrx   ry   rz   r   r   Z
documentIdz# already exists on Vectara (doc_id=z), skippingr{   zError indexing file z: )	enumeraterZ   pathexistsr`   r   openr~   r   rr   poprf   r}   r]   r^   rV   r   infoappend)rM   r   r   r   Zdoc_idsZinxr   mdr   ry   r   rs   r%   r%   r&   	add_files7  s8   


zVectara.add_filestextsr   Optional[dict]c              	   K  s   t  }|D ]	}||  q| }|du rdd |D }|r%d|d< nddi}|dd}|r3dnd	}	d
|dt||	dd t||D i}
| j|
|d}|dkr`| 	| | |
 |gS |dkrht
d |gS )a  Run more texts through the embeddings and add to the vectorstore.

        Args:
            texts: Iterable of strings to add to the vectorstore.
            metadatas: Optional list of metadatas associated with the texts.
            doc_metadata: optional metadata for the document

        This function indexes all the input text strings in the Vectara corpus as a
        single Vectara document, where each input text is considered a "section" and the
        metadata are associated with each section.
        if 'doc_metadata' is provided, it is associated with the Vectara document.

        Returns:
            document ID of the document added

        Nc                 S  s   g | ]}i qS r%   r%   )r   _r%   r%   r&   r     s    z%Vectara.add_texts.<locals>.<listcomp>rQ   rW   r   Fpartssectionrv   metadataJsonc                 S  s    g | ]\}}|t |d qS ))r   r   )r~   r   )r   r   r   r%   r%   r&   r     s    )r   r   r   ziNo permissions to add document to Vectara. 
                Check your corpus ID, customer ID and API key)r   updateencode	hexdigestr\   r~   r   zipr   r   print)rM   r   r   r   r   Zdoc_hashtrs   r   section_keyr   Zsuccess_strr%   r%   r&   	add_textsl  s6   

	

zVectara.add_textsqueryconfigr4   chatchat_conv_idc                 K  sn  t |jtrtdi |j|_t |jtrtdi |j|_d|d|jjdv r+|jjn|j|j	|j
d| j|jdgdgi}|jdkrSd|ji|d d d d d	< |jjd
kritd|jjid|d d d< n$|jjdkr}t|jjd|d d d< n|jjdkrdti|d d d< |jjr|jj|jj|jjdg|d d d< |rd|d|d d d d d< |S )zBuild the body for the API

        Args:
            query: Text to look up documents similar to.
            config: VectaraQueryConfig object
        Returns:
            A dictionary with the body of the query
        r   r   )rH   udfrerank_multilingual_v1)ZsentencesBeforeZsentencesAfter)ZcorpusIdZmetadataFilter)r   startZ
numResultsZcontextConfig	corpusKeylambdar   ZlexicalInterpolationConfigrH   ZdiversityBias)
rerankerIdZ	mmrConfigZrerankingConfigr   )r   ZuserFunctionr   r   )ZmaxSummarizedResultsZresponseLangZsummarizerPromptNamesummaryT)storeconversationIdr   Nr%   )
isinstancer@   rp   r-   rA   r   r/   r0   r6   r=   r>   r^   r9   r8   MMR_RERANKER_IDr1   UDF_RERANKER_IDr3   RERANKER_MULTILINGUAL_V1_IDr   r   r   r   )rM   r   r   r   r   r   r   r%   r%   r&   _get_query_body  s^   

zVectara._get_query_bodyList[Tuple[Document, float]]c              
     s  | j | fi |}| jj|  dt|| jd}|jdkr4t	dd|j d|j
 d|j d g S | } jrK fd	d
|d d d D }n|d d d }|d d d }g }	|D ],}
dd |
d D }|
d }dd || d D }d|vrd|d< || |	| q_dd
 t||	D } jjdv r|d j } jjr|d d d d d }|d d d d d d }|t|d|ddd f |S )!a7  Run a Vectara query

        Args:
            query: Text to look up documents similar to.
            config: VectaraQueryConfig object
        Returns:
            A list of k Documents matching the given query
            If summary is enabled, last document is the summary text with 'summary'=True
        zhttps://api.vectara.io/v1/query)ry   r   rw   rz   r{   Query failed %s(code r|   
, details )c                   s   g | ]}|d   j kr|qS score)r;   r   rr   r%   r&   r     s
    z)Vectara.vectara_query.<locals>.<listcomp>responseSetr   r   r   c                 S     i | ]	}|d  |d qS namevaluer%   r   mr%   r%   r&   
<dictcomp>%      z)Vectara.vectara_query.<locals>.<dictcomp>metadatadocumentIndexc                 S  r   r   r%   r   r%   r%   r&   r   '  r   rW   vectarac                 S  (   g | ]\}}t |d  |d|d fqS r   Zpage_contentr   r   r   r   xr   r%   r%   r&   r   -      rH   r   Nr   r   factualConsistencyr   T)r   fcsr   r7   )r   rf   r}   rr   r~   r   rV   r   r`   r   r   r   r;   r   r   r   r@   r/   r6   rA   r   r   )rM   r   r   r   r   r   r   	responses	documentsr   r   r   doc_numdoc_mdresr   r   r%   r   r&   vectara_query  s^   



zVectara.vectara_queryc                 K  s   t di |}| ||}|S )a  Return Vectara documents most similar to query, along with scores.

        Args:
            query: Text to look up documents similar to.
            k: Number of Documents to return. Defaults to 10.

            any other querying variable in VectaraQueryConfig like:
            - lambda_val: lexical match parameter for hybrid search.
            - filter: filter string
            - score_threshold: minimal score threshold for the result.
            - n_sentence_before: number of sentences before the matching segment
            - n_sentence_after: number of sentences after the matching segment
            - rerank_config: optional configuration for Reranking
              (see RerankConfig dataclass)
            - summary_config: optional configuration for summary
              (see SummaryConfig dataclass)
        Returns:
            List of Documents most similar to the query and score for each.
        Nr%   )r4   r   )rM   r   r   r   docsr%   r%   r&   similarity_search_with_scoreG  s   z$Vectara.similarity_search_with_scoreList[Document]c                 K  s    | j |fi |}dd |D S )a  Return Vectara documents most similar to query, along with scores.

        Args:
            query: Text to look up documents similar to.
            any other querying variable in VectaraQueryConfig

        Returns:
            List of Documents most similar to the query
        c                 S     g | ]\}}|qS r%   r%   r   r   r   r%   r%   r&   r   u      z-Vectara.similarity_search.<locals>.<listcomp>)r   )rM   r   r   docs_and_scoresr%   r%   r&   similarity_searchc  s   zVectara.similarity_searchr(         ?fetch_klambda_multr+   c                 K  s(   t d|d| d|d< | j|fi |S )aS  Return docs selected using the maximal marginal relevance.
        Maximal marginal relevance optimizes for similarity to query AND diversity
        among selected documents.

        Args:
            query: Text to look up documents similar to.
            k: Number of Documents to return. Defaults to 5.
            fetch_k: Number of Documents to fetch to pass to MMR algorithm.
                     Defaults to 50
            lambda_mult: Number between 0 and 1 that determines the degree
                        of diversity among the results with 0 corresponding
                        to maximum diversity and 1 to minimum diversity.
                        Defaults to 0.5.
            kwargs: any other querying variable in VectaraQueryConfig
        Returns:
            List of Documents selected by maximal marginal relevance.
        rH      rI   r@   )r-   r   )rM   r   r   r   r   r%   r%   r&   max_marginal_relevance_searchw  s   

z%Vectara.max_marginal_relevance_searchclsType[Vectara]	embeddingc                 K  s6   | di }| di |}|j||fd|i| |S )a  Construct Vectara wrapper from raw documents.
        This is intended to be a quick way to get started.
        Example:
            .. code-block:: python

                from langchain_community.vectorstores import Vectara
                vectara = Vectara.from_texts(
                    texts,
                    vectara_customer_id=customer_id,
                    vectara_corpus_id=corpus_id,
                    vectara_api_key=api_key,
                )
        r   Nr%   )r   r   )r  r   r  r   r   r   r   r%   r%   r&   
from_texts  s   zVectara.from_textsr   c                 K  s   | di |}| || |S )a  Construct Vectara wrapper from raw documents.
        This is intended to be a quick way to get started.
        Example:
            .. code-block:: python

                from langchain_community.vectorstores import Vectara
                vectara = Vectara.from_files(
                    files_list,
                    vectara_customer_id=customer_id,
                    vectara_corpus_id=corpus_id,
                    vectara_api_key=api_key,
                )
        Nr%   )r   )r  r   r  r   r   r   r%   r%   r&   
from_files  s   zVectara.from_files
VectaraRAGc                 C  s
   t | |S )zReturn a Vectara RAG runnable.r  rM   r   r%   r%   r&   as_rag  s   
zVectara.as_ragc                 C  s   t | |ddS )z'Return a Vectara RAG runnable for chat.T)r   r  r	  r%   r%   r&   as_chat  s   zVectara.as_chatVectaraRetrieverc                 K  s   t | |dt dS )zreturn a retriever object.r   )vectorstorer   )r  r\   r4   )rM   r   r%   r%   r&   as_retriever  s   zVectara.as_retriever)NNNrP   rQ   )
rR   rS   rT   rS   rU   rS   rV   r   rW   r   )rk   rl   )rk   rp   )rs   r   rk   r   F)r   rp   r   r   rk   r   rm   )r   r   r   r   rk   r   )r   r   r   r   r   r   rk   r   )NN)
r   r   r   r   r   r   r   r   rk   r   )FN)r   r   r   r4   r   r   r   rS   r   r   rk   rp   )r   r   r   r4   r   r   rk   r   )r   r   r   r   rk   r   )r   r   r   r   rk   r   )r(   r   )
r   r   r   r   r   r+   r   r   rk   r   )r  r  r   r   r  rl   r   r   r   r   rk   rO   )r  r  r   r   r  rl   r   r   r   r   rk   rO   )r   r4   rk   r  )r   r   rk   r  )r    r!   r"   r#   rN   propertyro   rr   r   r   r   r   r   r   r   r   r   r  classmethodr  r  r
  r  r  r%   r%   r%   r&   rO      sN    "

	8=
S
O


rO   c                   @  sD   e Zd ZU dZded< 	 ded< 	 eddZdddZdddZdS )r  zVectara Retriever class.rO   r  r4   r   T)Zarbitrary_types_allowedr   r   run_managerr   r   r   rk   r   c                K  s&   | j j|| jfi |}dd |D S )Nc                 S  r   r%   r%   r   r%   r%   r&   r     r   z<VectaraRetriever._get_relevant_documents.<locals>.<listcomp>)r  r   r   )rM   r   r  r   r   r%   r%   r&   _get_relevant_documents  s   z(VectaraRetriever._get_relevant_documentsr   r   c                 K  s   | j j|fi |S )zAdd documents to vectorstore.)r  add_documents)rM   r   r   r%   r%   r&   r    s   zVectaraRetriever.add_documentsN)r   r   r  r   r   r   rk   r   )r   r   r   r   rk   r   )	r    r!   r"   r#   r$   r   Zmodel_configr  r  r%   r%   r%   r&   r    s   
 
r  c                   @  s:   e Zd ZdZ	ddd	d
Z	ddddZ	ddddZdS )r  zVectara RAG runnable.

    Parameters:
        vectara: Vectara object
        config: VectaraQueryConfig object
        chat: bool, default False
    Fr   rO   r   r4   r   r   c                 C  s   || _ || _|| _d | _d S rm   )r   r   r   conv_id)rM   r   r   r   r%   r%   r&   rN     s   
zVectaraRAG.__init__Ninputr   Optional[RunnableConfig]r   r   rk   Iterator[dict]c              
   +  s    j | j j j} j jj j  dt	| j j
dd}|jdkr;tdd|j d|j d|j d	 d
S g }g }d|iV  | D ]}|rGt|d}	|	d }
|
d }|d
u r|
dd
}|d
u rlqHt|ddkrtd|dd d  qH|dd
}|r|dd
r|d }td|  |dkrd
 _td qH|r|dd
nd
}|r| _|dd
r|di dd
}d|iV  qHt|d }d|iV  qH jjr fdd|d  D }n|d  }|d! }g }|D ]-}d"d# |d$ D }|d% }d&d# || d$ D }d'|vrd(|d'< || || qd)d t||D } jjjd*v rB|d
 jj }d+|iV  qHd
S ),a  Get streaming output from Vectara RAG.

        Args:
            input: The input query
            config: RunnableConfig object
            kwargs: Any additional arguments

        Returns:
            The output dictionary with question, answer and context
        z&https://api.vectara.io/v1/stream-queryT)ry   r   rw   rz   r   r{   r   r   r|   r   r   Nquestionzutf-8r   r   r   r   r   z&Summary generation failed with status ZstatusDetailr   zChat query failed with code ZRESOURCE_EXHAUSTEDz-Sorry, Vectara chat turns exceeds plan limit.r   r   r   r   r   answerc                   s    g | ]}|d   j jkr|qS r   )r   r;   r   rn   r%   r&   r   Y  s
    z%VectaraRAG.stream.<locals>.<listcomp>r   r   c                 S  r   r   r%   r   r%   r%   r&   r   c  r   z%VectaraRAG.stream.<locals>.<dictcomp>r   r   c                 S  r   r   r%   r   r%   r%   r&   r   e  s    rW   r   c                 S  r   r   r   r   r%   r%   r&   r   m  r   r   context)r   r   r   r   r  rf   r}   rr   r~   r   rV   r   r`   r   r   r   
iter_linesloadsdecoder\   lenr   r   r;   r   r   r   r@   r/   r6   )rM   r  r   r   r   r   r   r   linerw   r   Zresponse_setr   r   Zst_coder  r   chunkr   r   r   r   r   r   r%   rn   r&   r     s   








zVectaraRAG.streamrp   c                 K  s   ddi}|  |D ]:}d|v r|d |d< q	d|v r!|d |d< q	d|v r0|d  |d 7  < q	d|v r;|d |d< q	td|  q	|S )Nr  r2   r  r  r   zUnknown chunk type: )r   r`   r   )rM   r  r   r   r   r!  r%   r%   r&   invoke  s   zVectaraRAG.invoker  )r   rO   r   r4   r   r   rm   )r  r   r   r  r   r   rk   r  )r  r   r   r  r   r   rk   rp   )r    r!   r"   r#   rN   r   r"  r%   r%   r%   r&   r    s    	{r  )/
__future__r   r~   loggingrZ   rJ   dataclassesr   r   hashlibr   typingr   r   r   r	   r
   r   r   rd   Z langchain_core.callbacks.managerr   Zlangchain_core.documentsr   Zlangchain_core.embeddingsr   Zlangchain_core.runnablesr   r   Zlangchain_core.vectorstoresr   r   Zpydanticr   	getLoggerr    r`   r   r   r   r   r'   r-   r4   rO   r  r  r%   r%   r%   r&   <module>   sD    $
R    6