o
    .if[                     @  s8  d Z ddlmZ ddlZddlZddlZddlZddlZddlZddl	m
Z
 ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZmZmZ ddlmZmZmZmZmZ dd	l m!Z! dd
l"m#Z# ddl$m%Z% zddl$m&Z& W n e'y   ddl(m&Z& Y nw ddl)m*Z*m+Z+ ddl,m-Z- ddl.m/Z/m0Z0 ddl1m2Z2 ddl3m4Z4 ddl5m6Z6m7Z7 ddl8m9Z9 ddl:m;Z; ddl<m=Z> e?e@ZAerddlBZBddlCmDZD ddlEm%ZF d\ddZGd]ddZHd^d!d"ZId]d#d$ZJd_d'd(ZKG d)d* d*e+ZLe& ZMG d+d, d,eMZNG d-d. d.e+ZOG d/d0 d0eOZPG d1d2 d2e+ZQG d3d4 d4e+e
ZRG d5d6 d6eRZSG d7d8 d8eRZTG d9d: d:e+ZUG d;d< d<e+ZVd`dAdBZWdadEdFZXG dGdH dHe+ZYdIZZdZ[G dJdK dKe+Z\dLZ]dMZ^dNZ_dZ`dOZaG dPdQ dQe+ZbG dRdS dSeMZcG dTdU dUe+ZddVZeG dWdX dXe+ZfdMZgdYZedOZhG dZd[ d[e+ZidS )ba<  
.. warning::
  Beta Feature!

**Cache** provides an optional caching layer for LLMs.

Cache is useful for two reasons:

- It can save you money by reducing the number of API calls you make to the LLM
  provider if you're often requesting the same completion multiple times.
- It can speed up your application by reducing the number of API calls you make
  to the LLM provider.

Cache directly competes with Memory. See documentation for Pros and Cons.

**Class hierarchy:**

.. code-block::

    BaseCache --> <name>Cache  # Examples: InMemoryCache, RedisCache, GPTCache
    )annotationsN)ABC)	timedelta)	lru_cache)
TYPE_CHECKINGAnyCallableDictListOptionalTupleTypeUnioncast)ColumnIntegerStringcreate_engineselect)Row)Engine)Session)declarative_base)RETURN_VAL_TYPE	BaseCache)
Embeddings)LLMget_promptsdumpsloads)ChatGeneration
Generation)get_from_env)AstraDBEnvironmentRedis)AstraDB_inputstrreturnc                 C     t |   S )z%Use a deterministic hashing approach.hashlibmd5encode	hexdigest)r)    r2   R/var/www/html/corbot_env/lib/python3.10/site-packages/langchain_community/cache.py_hashK      r4   generationsr   c                 C     t dd | D S )a  Dump generations to json.

    Args:
        generations (RETURN_VAL_TYPE): A list of language model generations.

    Returns:
        str: Json representing a list of generations.

    Warning: would not work well with arbitrary subclasses of `Generation`
    c                 S     g | ]}|  qS r2   dict.0
generationr2   r2   r3   
<listcomp>[       z-_dump_generations_to_json.<locals>.<listcomp>jsonr   r6   r2   r2   r3   _dump_generations_to_jsonP   s   rC   generations_jsonc                 C  s:   zt | }dd |D W S  t jy   td|  w )a`  Load generations from json.

    Args:
        generations_json (str): A string of json representing a list of generations.

    Raises:
        ValueError: Could not decode json string to list of generations.

    Returns:
        RETURN_VAL_TYPE: A list of generations.

    Warning: would not work well with arbitrary subclasses of `Generation`
    c                 S     g | ]	}t d i |qS r2   r#   r<   generation_dictr2   r2   r3   r>   n       z/_load_generations_from_json.<locals>.<listcomp>z.Could not decode json to list of generations: )rA   r!   JSONDecodeError
ValueError)rD   resultsr2   r2   r3   _load_generations_from_json^   s   
rN   c                 C  r7   )a  
    Serialization for generic RETURN_VAL_TYPE, i.e. sequence of `Generation`

    Args:
        generations (RETURN_VAL_TYPE): A list of language model generations.

    Returns:
        str: a single string representing a list of generations.

    This function (+ its counterpart `_loads_generations`) rely on
    the dumps/loads pair with Reviver, so are able to deal
    with all subclasses of Generation.

    Each item in the list can be `dumps`ed to a string,
    then we make the whole list of strings into a json-dumped.
    c                 S     g | ]}t |qS r2   r   )r<   _itemr2   r2   r3   r>      r?   z&_dumps_generations.<locals>.<listcomp>r@   rB   r2   r2   r3   _dumps_generationsu   s   rQ   generations_strUnion[RETURN_VAL_TYPE, None]c              	   C  s   zdd t | D }|W S  t jtfy   Y nw zt | }dd |D }td|  d |W S  t jtfyH   td|  d Y dS w )a  
    Deserialization of a string into a generic RETURN_VAL_TYPE
    (i.e. a sequence of `Generation`).

    See `_dumps_generations`, the inverse of this function.

    Args:
        generations_str (str): A string representing a list of generations.

    Compatible with the legacy cache-blob format
    Does not raise exceptions for malformed entries, just logs a warning
    and returns none: the caller should be prepared for such a cache miss.

    Returns:
        RETURN_VAL_TYPE: A list of generations.
    c                 S  rO   r2   r    )r<   	_item_strr2   r2   r3   r>      r?   z&_loads_generations.<locals>.<listcomp>c                 S  rE   rF   rG   rH   r2   r2   r3   r>      rJ   z.Legacy 'Generation' cached blob encountered: ''z/Malformed/unparsable cached blob encountered: 'N)rA   r!   rK   	TypeErrorloggerwarning)rR   r6   	gen_dictsr2   r2   r3   _loads_generations   s&   


rZ   c                   @  sV   e Zd ZdZdddZdd
dZdddZdddZdddZdddZ	dddZ
dS )InMemoryCachez#Cache that stores things in memory.r+   Nonec                 C  
   i | _ dS )zInitialize with empty cache.N_cacheselfr2   r2   r3   __init__      
zInMemoryCache.__init__promptr*   
llm_stringOptional[RETURN_VAL_TYPE]c                 C  s   | j ||fdS )'Look up based on prompt and llm_string.N)r_   getra   rd   re   r2   r2   r3   lookup   r5   zInMemoryCache.lookup
return_valr   c                 C  s   || j ||f< dS ,Update cache based on prompt and llm_string.Nr^   ra   rd   re   rk   r2   r2   r3   update   r5   zInMemoryCache.updatekwargsr   c                 K  r]   Clear cache.Nr^   ra   rp   r2   r2   r3   clear   rc   zInMemoryCache.clearc                   s   |  ||S rg   )rj   ri   r2   r2   r3   alookup      zInMemoryCache.alookupc                   s   |  ||| dS rl   )ro   rn   r2   r2   r3   aupdate   s   zInMemoryCache.aupdatec                   s   |    dS rq   )rt   rs   r2   r2   r3   aclear   rw   zInMemoryCache.aclearN)r+   r\   rd   r*   re   r*   r+   rf   rd   r*   re   r*   rk   r   r+   r\   rp   r   r+   r\   )__name__
__module____qualname____doc__rb   rj   ro   rt   rv   rx   ry   r2   r2   r2   r3   r[      s    





r[   c                   @  s@   e Zd ZdZdZeeddZeeddZee	ddZ
eeZdS )FullLLMCache2SQLite table for full LLM Cache (all generations).full_llm_cacheTprimary_keyN)r}   r~   r   r   __tablename__r   r   rd   llmr   idxresponser2   r2   r2   r3   r      s    r   c                   @  s<   e Zd ZdZefdddZdddZdddZdddZdS )SQLAlchemyCache'Cache that uses SQAlchemy as a backend.enginer   cache_schemaType[FullLLMCache]c                 C      || _ || _| jj| j  dS z"Initialize by creating all tables.Nr   r   metadata
create_allra   r   r   r2   r2   r3   rb      s   zSQLAlchemyCache.__init__rd   r*   re   r+   rf   c              	   C  s   t | jj| jj|k| jj|k| jj}t| j	>}|
| }|rTzdd |D W W  d   S  tyS   td dd |D  Y W  d   S w W d   dS 1 s_w   Y  dS )rg   c                 S     g | ]}t |d  qS r   r    r<   rowr2   r2   r3   r>          z*SQLAlchemyCache.lookup.<locals>.<listcomp>NRetrieving a cache value that could not be deserialized properly. This is likely due to the cache being in an older format. Please recreate your cache to avoid this error.c                 S  s   g | ]	}t |d  dqS )r   textrG   r   r2   r2   r3   r>      rJ   )r   r   r   whererd   r   order_byr   r   r   executefetchall	ExceptionrW   rX   )ra   rd   re   stmtsessionrowsr2   r2   r3   rj      s.   


zSQLAlchemyCache.lookuprk   r   r\   c              	     s    fddt |D }tj/}|  |D ]}|| qW d   n1 s,w   Y  W d   dS W d   dS 1 sDw   Y  dS )&Update based on prompt and llm_string.c                   s&   g | ]\}}j  t||d qS ))rd   r   r   r   )r   r   r<   igenre   rd   ra   r2   r3   r>     s    z*SQLAlchemyCache.update.<locals>.<listcomp>N)	enumerater   r   beginmergera   rd   re   rk   itemsr   itemr2   r   r3   ro     s   PzSQLAlchemyCache.updaterp   r   c                 K  sH   t | j}|| j  |  W d   dS 1 sw   Y  dS rq   )r   r   queryr   deletecommitra   rp   r   r2   r2   r3   rt     s   
"zSQLAlchemyCache.clearN)r   r   r   r   rz   r{   r|   )	r}   r~   r   r   r   rb   rj   ro   rt   r2   r2   r2   r3   r      s    


r   c                      s$   e Zd ZdZdd fddZ  ZS )	SQLiteCachez$Cache that uses SQLite as a backend..langchain.dbdatabase_pathr*   c                   s   t d| }t | dS )z1Initialize by creating the engine and all tables.z
sqlite:///N)r   superrb   )ra   r   r   	__class__r2   r3   rb     s   zSQLiteCache.__init__)r   )r   r*   )r}   r~   r   r   rb   __classcell__r2   r2   r   r3   r     s    r   c                   @  sH   e Zd ZdZddddd	ZdddZdddZdddZdddZdS ) UpstashRedisCachez+Cache that uses Upstash Redis as a backend.Nttlredis_r   r   Optional[int]c                C  J   zddl m} W n ty   tdw t||std|| _|| _dS )ag  
        Initialize an instance of UpstashRedisCache.

        This method initializes an object with Upstash Redis caching capabilities.
        It takes a `redis_` parameter, which should be an instance of an Upstash Redis
        client class, allowing the object to interact with Upstash Redis
        server for caching purposes.

        Parameters:
            redis_: An instance of Upstash Redis client class
                (e.g., Redis) used for caching.
                This allows the object to communicate with
                Redis server for caching operations on.
            ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
                If provided, it sets the time duration for how long cached
                items will remain valid. If not provided, cached items will not
                have an automatic expiration.
        r   r&   zbCould not import upstash_redis python package. Please install it with `pip install upstash_redis`.z$Please pass in Upstash Redis object.N)upstash_redisr'   ImportErrorrL   
isinstanceredisr   ra   r   r   r'   r2   r2   r3   rb        

zUpstashRedisCache.__init__rd   r*   re   r+   c                 C     t || S z&Compute key from prompt and llm_stringr4   ri   r2   r2   r3   _key=  s   zUpstashRedisCache._keyrf   c                 C  sJ   g }| j | ||}|r| D ]\}}|t|d q|r#|S dS )rg   r   N)r   hgetallr   r   appendr#   )ra   rd   re   r6   rM   _r   r2   r2   r3   rj   A  s   zUpstashRedisCache.lookuprk   r   r\   c                 C  s   |D ]}t |tstdt| t |trtd  dS q| ||}dd t|D }| j	j
||d | jdurF| j	|| j dS dS )rm   zBUpstashRedisCache supports caching of normal LLM generations, got zcNOTE: Generation has not been cached. UpstashRedisCache does not support caching ChatModel outputs.Nc                 S  s   i | ]
\}}t ||jqS r2   )r*   r   r<   r   r=   r2   r2   r3   
<dictcomp>\  s    z,UpstashRedisCache.update.<locals>.<dictcomp>)keyvalues)r   r#   rL   typer"   warningswarnr   r   r   hsetr   expire)ra   rd   re   rk   r   r   mappingr2   r2   r3   ro   K  s*   


zUpstashRedisCache.updaterp   c                 K  s,   | dd}|rd}nd}| jj|d dS )zt
        Clear cache. If `asynchronous` is True, flush asynchronously.
        This flushes the *whole* db.
        asynchronousFASYNCSYNC)
flush_typeNrh   r   flushdbra   rp   r   r2   r2   r3   rt   d  s
   zUpstashRedisCache.clearr   r   r   r   rd   r*   re   r*   r+   r*   rz   r{   r|   )	r}   r~   r   r   rb   r   rj   ro   rt   r2   r2   r2   r3   r     s    



r   c                   @  sH   e Zd ZedddZedd
dZedddZe	ddddZdS )_RedisCacheBaserd   r*   re   r+   c                 C  s   t | | S r   r   rd   re   r2   r2   r3   r   r  s   z_RedisCacheBase._keyrk   r   r\   c                 C  s*   | D ]}t |tstdt| qd S )Nz@RedisCache only supports caching of normal LLM generations, got )r   r#   rL   r   )rk   r   r2   r2   r3   _ensure_generation_typew  s   
z'_RedisCacheBase._ensure_generation_typerM   dict[str | bytes, str | bytes]Optional[List[Generation]]c              	   C  sd   g }| r,|   D ]#\}}z	|t| W q ty+   td |t|d Y qw |r0|S d S )Nr   r   )r   r   r!   r   rW   rX   r#   )rM   r6   r   r   r2   r2   r3   _get_generations  s   
z _RedisCacheBase._get_generationsNr   piper   r   r   c                 C  s8   |j | dd t|D d |d ur|| | d S d S )Nc                 S  s   i | ]\}}t |t|qS r2   )r*   r   r   r2   r2   r3   r     s    zB_RedisCacheBase._configure_pipeline_for_update.<locals>.<dictcomp>)r   )r   r   r   )r   r   rk   r   r2   r2   r3   _configure_pipeline_for_update  s   z._RedisCacheBase._configure_pipeline_for_updater   )rk   r   r+   r\   )rM   r   r+   r   N)
r   r*   r   r   rk   r   r   r   r+   r\   )r}   r~   r   staticmethodr   r   r   r   r2   r2   r2   r3   r   q  s    r   c                   @  s>   e Zd ZdZddddd	ZdddZdddZdddZdS )
RedisCachezX
    Cache that uses Redis as a backend. Allows to use a sync `redis.Redis` client.
    Nr   r   r   r   r   c                C  sJ   zddl m} W n ty   tdw t||std|| _ || _dS )a`  
        Initialize an instance of RedisCache.

        This method initializes an object with Redis caching capabilities.
        It takes a `redis_` parameter, which should be an instance of a Redis
        client class (`redis.Redis`), allowing the object
        to interact with a Redis server for caching purposes.

        Parameters:
            redis_ (Any): An instance of a Redis client class
                (`redis.Redis`) to be used for caching.
                This allows the object to communicate with a
                Redis server for caching operations.
            ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
                If provided, it sets the time duration for how long cached
                items will remain valid. If not provided, cached items will not
                have an automatic expiration.
        r   r&   zTCould not import `redis` python package. Please install it with `pip install redis`.z)Please pass a valid `redis.Redis` client.N)r   r'   r   rL   r   r   r   r2   r2   r3   rb     r   zRedisCache.__init__rd   r*   re   r+   rf   c                 C  s   | j | ||}| |S ru   r   r   r   r   ra   rd   re   rM   r2   r2   r3   rj     s   
zRedisCache.lookuprk   r   r\   c                 C  s`   |  | | ||}| j }| |||| j |  W d   dS 1 s)w   Y  dS rl   r   r   r   pipeliner   r   r   ra   rd   re   rk   r   r   r2   r2   r3   ro     s   

"zRedisCache.updaterp   c                 K  s&   | dd}| jjdd|i| dS )=Clear cache. If `asynchronous` is True, flush asynchronously.r   FNr2   r   r   r2   r2   r3   rt     s   zRedisCache.clearr   rz   r{   r|   )r}   r~   r   r   rb   rj   ro   rt   r2   r2   r2   r3   r     s    

	r   c                   @  s\   e Zd ZdZddddd	Zd ddZd ddZd!ddZd!ddZd"ddZ	d"ddZ
dS )#AsyncRedisCachezf
    Cache that uses Redis as a backend. Allows to use an
    async `redis.asyncio.Redis` client.
    Nr   r   r   r   r   c                C  r   )au  
        Initialize an instance of AsyncRedisCache.

        This method initializes an object with Redis caching capabilities.
        It takes a `redis_` parameter, which should be an instance of a Redis
        client class (`redis.asyncio.Redis`), allowing the object
        to interact with a Redis server for caching purposes.

        Parameters:
            redis_ (Any): An instance of a Redis client class
                (`redis.asyncio.Redis`) to be used for caching.
                This allows the object to communicate with a
                Redis server for caching operations.
            ttl (int, optional): Time-to-live (TTL) for cached items in seconds.
                If provided, it sets the time duration for how long cached
                items will remain valid. If not provided, cached items will not
                have an automatic expiration.
        r   r&   z\Could not import `redis.asyncio` python package. Please install it with `pip install redis`.z1Please pass a valid `redis.asyncio.Redis` client.N)redis.asyncior'   r   rL   r   r   r   r   r2   r2   r3   rb     r   zAsyncRedisCache.__init__rd   r*   re   r+   rf   c                 C     t d)rg   zjThis async Redis cache does not implement `lookup()` method. Consider using the async `alookup()` version.NotImplementedErrorri   r2   r2   r3   rj        zAsyncRedisCache.lookupc                   s&   | j | ||I dH }| |S )z6Look up based on prompt and llm_string. Async version.Nr   r   r2   r2   r3   rv     s   
zAsyncRedisCache.alookuprk   r   r\   c                 C  r   )rm   zjThis async Redis cache does not implement `update()` method. Consider using the async `aupdate()` version.r   rn   r2   r2   r3   ro     r   zAsyncRedisCache.updatec              	     s|   |  | | ||}| j 4 I dH }| |||| j | I dH  W d  I dH  dS 1 I dH s7w   Y  dS )z;Update cache based on prompt and llm_string. Async version.Nr   r   r2   r2   r3   rx     s   
.zAsyncRedisCache.aupdaterp   c                 K  r   )r   zhThis async Redis cache does not implement `clear()` method. Consider using the async `aclear()` version.r   rs   r2   r2   r3   rt     r   zAsyncRedisCache.clearc                   s.   | dd}| jjdd|i|I dH  dS )zf
        Clear cache. If `asynchronous` is True, flush asynchronously.
        Async version.
        r   FNr2   r   r   r2   r2   r3   ry   &  s    zAsyncRedisCache.aclearr   rz   r{   r|   )r}   r~   r   r   rb   rj   rv   ro   rx   rt   ry   r2   r2   r2   r3   r     s    




r   c                   @  sn   e Zd ZdZdddigddiddigdZ	d"d#ddZd$ddZd%ddZd&ddZd'ddZ	d(dd Z
d!S ))RedisSemanticCachez0Cache that uses Redis as a vector-store backend.rd   namerk   re   )content_keyr   extra皙?	redis_urlr*   	embeddingr   score_thresholdfloatc                 C  s   i | _ || _|| _|| _dS )a  Initialize by passing in the `init` GPTCache func

        Args:
            redis_url (str): URL to connect to Redis.
            embedding (Embedding): Embedding provider for semantic encoding and search.
            score_threshold (float, 0.2):

        Example:

        .. code-block:: python

            from langchain_community.globals import set_llm_cache

            from langchain_community.cache import RedisSemanticCache
            from langchain_community.embeddings import OpenAIEmbeddings

            set_llm_cache(RedisSemanticCache(
                redis_url="redis://localhost:6379",
                embedding=OpenAIEmbeddings()
            ))

        N)_cache_dictr   r   r   )ra   r   r   r   r2   r2   r3   rb   <  s   
zRedisSemanticCache.__init__r+   c                 C  s   t |}d| S )Nzcache:r   )ra   re   hashed_indexr2   r2   r3   _index_nameZ  s   
zRedisSemanticCache._index_nameRedisVectorstorec              
   C  s   |  |}|| jv r| j| S ztj| j|| jtt| jd| j|< W n+ t	yN   t| j|| jtt| jd}| jj
dd}|jt|d || j|< Y nw | j| S )N)r   
index_namer   schema)r   r  r   index_schematestr   )dim)r  r  r  from_existing_indexr   r   r   r	   DEFAULT_SCHEMArL   embed_query_create_index_if_not_existlen)ra   re   r  r   
_embeddingr2   r2   r3   _get_llm_cache^  s,   





z!RedisSemanticCache._get_llm_cacherp   r   r\   c                 K  s@   |  |d }|| jv r| j| j|d| jd | j|= dS dS )z,Clear semantic cache for a given llm_string.re   T)r  delete_documentsr   N)r  r  
drop_indexr   )ra   rp   r  r2   r2   r3   rt   z  s   

zRedisSemanticCache.clearrf   c              	   C  s   |  |}g }|j|d| jd}|r;|D ]&}z|t|jd  W q ty:   td |t	|jd  Y qw |r?|S dS )rg      )r   kdistance_thresholdrk   r   N)
r  similarity_searchr   extendr!   r   r   rW   rX   rN   )ra   rd   re   	llm_cacher6   rM   documentr2   r2   r3   rj     s(   
zRedisSemanticCache.lookupr   c                 C  s`   |D ]}t |tstdt| q| |}||tdd |D d}|j|g|gd dS )rm   zHRedisSemanticCache only supports caching of normal LLM generations, got c                 S  s   g | ]}|qS r2   r2   )r<   gr2   r2   r3   r>     s    z-RedisSemanticCache.update.<locals>.<listcomp>)re   rd   rk   )texts	metadatasN)r   r#   rL   r   r  r   	add_texts)ra   rd   re   rk   r   r  r   r2   r2   r3   ro     s   

zRedisSemanticCache.updateN)r   )r   r*   r   r   r   r   )re   r*   r+   r*   )re   r*   r+   r  r|   rz   r{   )r}   r~   r   r   r  rb   r  r  rt   rj   ro   r2   r2   r2   r3   r   /  s    	



	r   c                   @  sP   e Zd ZdZ	ddddZdddZdddZdddZdddZd ddZ	dS )!GPTCachez&Cache that uses GPTCache as a backend.N	init_func>Union[Callable[[Any, str], None], Callable[[Any], None], None]c                 C  s4   zddl }W n ty   tdw || _i | _dS )a  Initialize by passing in init function (default: `None`).

        Args:
            init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
            (default: `None`)

        Example:
        .. code-block:: python

            # Initialize GPTCache with a custom init function
            import gptcache
            from gptcache.processor.pre import get_prompt
            from gptcache.manager.factory import get_data_manager
            from langchain_community.globals import set_llm_cache

            # Avoid multiple caches using the same file,
            causing different llm model caches to affect each other

            def init_gptcache(cache_obj: gptcache.Cache, llm str):
                cache_obj.init(
                    pre_embedding_func=get_prompt,
                    data_manager=manager_factory(
                        manager="map",
                        data_dir=f"map_cache_{llm}"
                    ),
                )

            set_llm_cache(GPTCache(init_gptcache))

        r   NzXCould not import gptcache python package. Please install it with `pip install gptcache`.)gptcacher   init_gptcache_funcgptcache_dict)ra   r  r!  r2   r2   r3   rb     s   $
zGPTCache.__init__re   r*   r+   r   c                 C  s   ddl m} ddlm} ddlm} | }| jdur4t| j}t	|j
dkr.| || n| | n
|j|||dd || j|< |S )	zNew gptcache objectr   Cache)get_data_manager)
get_promptN   )	data_path)pre_embedding_funcdata_manager)r!  r%  gptcache.manager.factoryr&  gptcache.processor.prer'  r"  inspect	signaturer  
parametersinitr#  )ra   re   r%  r&  r'  	_gptcachesigr2   r2   r3   _new_gptcache  s   

zGPTCache._new_gptcachec                 C  s    | j |d}|s| |}|S )zgGet a cache object.

        When the corresponding llm model cache does not exist, it will be created.N)r#  rh   r4  )ra   re   r2  r2   r2   r3   _get_gptcache  s   
zGPTCache._get_gptcacherd   rf   c                 C  s>   ddl m} | |}|||d}|rdd t|D S dS )zLook up the cache data.
        First, retrieve the corresponding cache object using the `llm_string` parameter,
        and then retrieve the data from the cache based on the `prompt`.
        r   )rh   	cache_objc                 S  rE   rF   rG   rH   r2   r2   r3   r>     s    z#GPTCache.lookup.<locals>.<listcomp>N)gptcache.adapter.apirh   r5  rA   r!   )ra   rd   re   rh   r2  resr2   r2   r3   rj     s   
zGPTCache.lookuprk   r   r\   c                 C  sb   |D ]}t |tstdt| qddlm} | |}tdd |D }||||d dS )zUpdate cache.
        First, retrieve the corresponding cache object using the `llm_string` parameter,
        and then store the `prompt` and `return_val` in the cache object.
        z>GPTCache only supports caching of normal LLM generations, got r   )putc                 S  r8   r2   r9   r;   r2   r2   r3   r>   "  r?   z#GPTCache.update.<locals>.<listcomp>r6  N)	r   r#   rL   r   r8  r:  r5  rA   r   )ra   rd   re   rk   r   r:  r2  handled_datar2   r2   r3   ro     s   

zGPTCache.updaterp   c                 K  s<   ddl m} | j D ]}t||}|  q| j  dS )rr   r   r$  N)r!  r%  r#  r   r   flushrt   )ra   rp   r%  gptcache_instancer2   r2   r3   rt   &  s
   

zGPTCache.clearr   )r  r   )re   r*   r+   r   rz   r{   r|   )
r}   r~   r   r   rb   r4  r5  rj   ro   rt   r2   r2   r2   r3   r    s    
1

	
r  cache_clientmomento.CacheClient
cache_namer\   c                 C  sR   ddl m} | |}t||jst||jrdS t||jr"|jtd| )zCreate cache if it doesn't exist.

    Raises:
        SdkException: Momento service or network error
        Exception: Unexpected response
    r   )CreateCacheNz$Unexpected response cache creation: )	momento.responsesrA  create_cacher   SuccessCacheAlreadyExistsErrorinner_exceptionr   )r>  r@  rA  create_cache_responser2   r2   r3   _ensure_cache_exists1  s   
rI  r   Optional[timedelta]c                 C  s.   | d ur| t ddkrtd|  dd S d S )Nr   )secondszttl must be positive but was .)r   rL   r   r2   r2   r3   _validate_ttlE  s   rM  c                   @  sb   e Zd ZdZdddd)ddZeddddd*ddZd+ddZd,d d!Zd-d%d&Z	d.d'd(Z
dS )/MomentoCachez@Cache that uses Momento as a backend. See https://gomomento.com/NT)r   ensure_cache_existsr>  r?  r@  r*   r   rJ  rO  boolc                C  sf   zddl m} W n ty   tdw t||stdt| |r(t|| || _|| _|| _	dS )aU  Instantiate a prompt cache using Momento as a backend.

        Note: to instantiate the cache client passed to MomentoCache,
        you must have a Momento account. See https://gomomento.com/.

        Args:
            cache_client (CacheClient): The Momento cache client.
            cache_name (str): The name of the cache to use to store the data.
            ttl (Optional[timedelta], optional): The time to live for the cache items.
                Defaults to None, ie use the client default TTL.
            ensure_cache_exists (bool, optional): Create the cache if it doesn't
                exist. Defaults to True.

        Raises:
            ImportError: Momento python package is not installed.
            TypeError: cache_client is not of type momento.CacheClientObject
            ValueError: ttl is non-null and non-negative
        r   )CacheClientVCould not import momento python package. Please install it with `pip install momento`.z2cache_client must be a momento.CacheClient object.N)
momentorQ  r   r   rV   rM  rI  r>  r@  r   )ra   r>  r@  r   rO  rQ  r2   r2   r3   rb   M  s   


zMomentoCache.__init__)configurationapi_key
auth_tokenr   rT  &Optional[momento.config.Configuration]rU  Optional[str]rV  rp   r   r+   c                K  s   zddl m}m}m}	 W n ty   tdw |du r!|j }z	|p(tdd}W n ty:   |p7tdd}Y nw |		|}
|||
|d	}| ||fd
|i|S )z,Construct cache from CacheClient parameters.r   )rQ  ConfigurationsCredentialProviderrR  NrV  MOMENTO_AUTH_TOKENrU  MOMENTO_API_KEY)default_ttlr   )
rS  rQ  rY  rZ  r   Laptopv1r$   rL   from_string)clsr@  r   rT  rU  rV  rp   rQ  rY  rZ  credentialsr>  r2   r2   r3   from_client_paramsx  s"   

zMomentoCache.from_client_paramsrd   re   c                 C  r   )a  Compute cache key from prompt and associated model and settings.

        Args:
            prompt (str): The prompt run through the language model.
            llm_string (str): The language model version and settings.

        Returns:
            str: The cache key.
        r   ri   r2   r2   r3   __key  s   
zMomentoCache.__keyrf   c                 C  sp   ddl m} g }| j| j| ||}t||jr"|j}t	|}nt||j
r)n	t||jr2|j|r6|S dS )a  Lookup llm generations in cache by prompt and associated model and settings.

        Args:
            prompt (str): The prompt run through the language model.
            llm_string (str): The language model version and settings.

        Raises:
            SdkException: Momento service or network error

        Returns:
            Optional[RETURN_VAL_TYPE]: A list of language model generations.
        r   )CacheGetN)rB  re  r>  rh   r@  _MomentoCache__keyr   Hitvalue_stringrN   MissrF  rG  )ra   rd   re   re  r6   get_responsevaluer2   r2   r3   rj     s   
zMomentoCache.lookuprk   r   r\   c           	      C  s   |D ]}t |tstdt| q| ||}t|}| j| j||| j	}ddl
m} t ||jr6dS t ||jr?|jtd| )a|  Store llm generations in cache.

        Args:
            prompt (str): The prompt run through the language model.
            llm_string (str): The language model string.
            return_val (RETURN_VAL_TYPE): A list of language model generations.

        Raises:
            SdkException: Momento service or network error
            Exception: Unexpected response
        z=Momento only supports caching of normal LLM generations, got r   )CacheSetzUnexpected response: N)r   r#   rL   r   rf  rC   r>  setr@  r   rB  rl  rD  rF  rG  r   )	ra   rd   re   rk   r   r   rk  set_responserl  r2   r2   r3   ro     s"   
zMomentoCache.updatec                 K  s@   ddl m} | j| j}t||jrdS t||jr|jdS )zeClear the cache.

        Raises:
            SdkException: Momento service or network error
        r   )
CacheFlushN)	rB  ro  r>  flush_cacher@  r   rD  rF  rG  )ra   rp   ro  flush_responser2   r2   r3   rt     s   zMomentoCache.clear)r>  r?  r@  r*   r   rJ  rO  rP  )r@  r*   r   r   rT  rW  rU  rX  rV  rX  rp   r   r+   rN  r   rz   r{   r|   )r}   r~   r   r   rb   classmethodrc  rf  rj   ro   rt   r2   r2   r2   r3   rN  J  s    +


rN  langchain_llm_cachec                   @  s\   e Zd ZdZddeedfd'ddZd(ddZd)ddZ	d*d+dd Z	d,d!d"Z
d-d%d&ZdS ).CassandraCachea]  
    Cache that uses Cassandra / Astra DB as a backend.

    It uses a single Cassandra table.
    The lookup keys (which get to form the primary key) are:
        - prompt, a string
        - llm_string, a deterministic str representation of the model parameters.
          (needed to prevent collisions same-prompt-different-model collisions)
    NFr   Optional[CassandraSession]keyspacerX  
table_namer*   ttl_secondsr   skip_provisioningrP  c              	   C  sp   zddl m} W n ttfy   tdw || _|| _|| _|| _|| j| j| jddgddg| j|d| _	dS )	a  
        Initialize with a ready session and a keyspace name.
        Args:
            session (cassandra.cluster.Session): an open Cassandra session
            keyspace (str): the keyspace to use for storing the cache
            table_name (str): name of the Cassandra table to use as cache
            ttl_seconds (optional int): time-to-live for cache entries
                (default: None, i.e. forever)
        r   )ElasticCassandraTableTCould not import cassio python package. Please install it with `pip install cassio`.re   rd   TEXT)r   rv  tablekeysprimary_key_typerx  ry  N)
cassio.tablerz  r   ModuleNotFoundErrorrL   r   rv  rw  rx  kv_cache)ra   r   rv  rw  rx  ry  rz  r2   r2   r3   rb     s(   zCassandraCache.__init__rd   re   r+   rf   c                 C  s@   | j jt|t|d}|durt|d }|dur|S dS dS )rg   re   rd   N	body_blob)r  rh   r4   rZ   )ra   rd   re   r   r6   r2   r2   r3   rj   $  s   zCassandraCache.lookuprk   r   r\   c                 C  s&   t |}| jjt|t||d dS )rm   )re   rd   r  N)rQ   r  r:  r4   )ra   rd   re   rk   blobr2   r2   r3   ro   4  s   
zCassandraCache.updater   r   stopOptional[List[str]]c                 C  ,   t i | d|ig d }| j||dS z
        A wrapper around `delete` with the LLM being passed.
        In case the llm(prompt) calls have a `stop` param, you should pass it here
        r  r  re   r   r:   r   ra   rd   r   r  re   r2   r2   r3   delete_through_llm=     z!CassandraCache.delete_through_llmc                 C  s   | j jt|t|dS )%Evict from cache if there's an entry.r  )r  r   r4   ri   r2   r2   r3   r   J  s   zCassandraCache.deleterp   r   c                 K     | j   dS z*Clear cache. This is for all LLMs at once.N)r  rt   rs   r2   r2   r3   rt   Q     zCassandraCache.clear)
r   ru  rv  rX  rw  r*   rx  r   ry  rP  rz   r{   r   rd   r*   r   r   r  r  r+   r\   rd   r*   re   r*   r+   r\   r|   )r}   r~   r   r   "CASSANDRA_CACHE_DEFAULT_TABLE_NAME#CASSANDRA_CACHE_DEFAULT_TTL_SECONDSrb   rj   ro   r  r   rt   r2   r2   r2   r3   rt    s    
(


rt  dotg333333?langchain_llm_semantic_cache   c                   @  sp   e Zd ZdZeeeedfd3ddZd4ddZ	d5ddZ
d6d d!Zd7d#d$Z	%d8d9d*d+Zd:d-d.Zd;d1d2Zd%S )<CassandraSemanticCachea  
    Cache that uses Cassandra as a vector-store backend for semantic
    (i.e. similarity-based) lookup.

    It uses a single (vector) Cassandra table and stores, in principle,
    cached values from several LLMs, so the LLM's llm_string is part
    of the rows' primary keys.

    The similarity is based on one of several distance metrics (default: "dot").
    If choosing another metric, the default threshold is to be re-tuned accordingly.
    Fr   ru  rv  rX  r   r   rw  r*   distance_metricr   r   rx  r   ry  rP  c	              
     s   zddl m}	 W n ttfy   tdw | _| _| _| _| _	| _
| _ttdd fd	d
}
|
 _   _|	 j j jdg j jddhf|d _dS )a  
        Initialize the cache with all relevant parameters.
        Args:
            session (cassandra.cluster.Session): an open Cassandra session
            keyspace (str): the keyspace to use for storing the cache
            embedding (Embedding): Embedding provider for semantic
                encoding and search.
            table_name (str): name of the Cassandra (vector) table
                to use as cache
            distance_metric (str, 'dot'): which measure to adopt for
                similarity searches
            score_threshold (optional float): numeric value to use as
                cutoff for the similarity searches
            ttl_seconds (optional int): time-to-live for cache entries
                (default: None, i.e. forever)
        The default score threshold is tuned to the default metric.
        Tune it carefully yourself if switching to another distance metric.
        r   )MetadataVectorCassandraTabler{  maxsizer   r*   r+   List[float]c                       j j| dS Nr   r   r  r   r`   r2   r3   _cache_embedding  r  z9CassandraSemanticCache.__init__.<locals>._cache_embeddingr|  allow_llm_string_hash)r   rv  r}  r  vector_dimensionrx  metadata_indexingry  Nr   r*   r+   r  )r  r  r   r  rL   r   rv  r   rw  r  r   rx  r   -CASSANDRA_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZE_get_embedding_get_embedding_dimensionembedding_dimensionr}  )ra   r   rv  r   rw  r  r   rx  ry  r  r  r2   r`   r3   rb   j  s8   
zCassandraSemanticCache.__init__r+   intc                 C     t | jddS NzThis is a sample sentence.r   r  r  r`   r2   r2   r3   r       z/CassandraSemanticCache._get_embedding_dimensionrd   re   rk   r   r\   c           	      C  sP   | j |d}t|}t|}||d}t| d| }| jj||||d dS )rm   r   )_promptr  -)r  vectorrow_idr   N)r  r4   rQ   r}  r:  )	ra   rd   re   rk   embedding_vectorllm_string_hashbodyr   r  r2   r2   r3   ro     s   
zCassandraSemanticCache.updaterf   c                 C      |  ||}|dur|d S dS rg   Nr  lookup_with_idra   rd   re   hit_with_idr2   r2   r3   rj        zCassandraSemanticCache.lookup%Optional[Tuple[str, RETURN_VAL_TYPE]]c                 C  sf   | j |d}t| jj|dt|id| j| jd}|r1|d }t|d }|dur/|d |fS dS dS )	zw
        Look up based on prompt and llm_string.
        If there are hits, return (document_id, cached_entry)
        r   r  r  )r  r   nmetricmetric_thresholdr   r  Nr  )r  listr}  metric_ann_searchr4   r  r   rZ   )ra   rd   re   prompt_embeddinghitshitr6   r2   r2   r3   r    s&   
	z%CassandraSemanticCache.lookup_with_idNr   r   r  r  c                 C  r  Nr  r  r  r   r:   r  r  r2   r2   r3   lookup_with_id_through_llm     z1CassandraSemanticCache.lookup_with_id_through_llmdocument_idc                 C  s   | j j|d dS )
        Given this is a "similarity search" cache, an invalidation pattern
        that makes sense is first a lookup to get an ID, and then deleting
        with that ID. This is for the second step.
        )r  N)r}  r   ra   r  r2   r2   r3   delete_by_document_id  s   z,CassandraSemanticCache.delete_by_document_idrp   r   c                 K  r  z!Clear the *whole* semantic cache.N)r}  rt   rs   r2   r2   r3   rt     r  zCassandraSemanticCache.clear)r   ru  rv  rX  r   r   rw  r*   r  r*   r   r   rx  r   ry  rP  r+   r  r{   rz   rd   r*   re   r*   r+   r  r   rd   r*   r   r   r  r  r+   r  r  r*   r+   r\   r|   )r}   r~   r   r   +CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME0CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC0CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD,CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDSrb   r  ro   rj   r  r  r  rt   r2   r2   r2   r3   r  ]  s     
C


 
	r  c                   @  sT   e Zd ZdZdZeeddZeeddZeeddZ	ee
ddZeeZeeZdS )FullMd5LLMCacher   full_md5_llm_cacheTr   )indexN)r}   r~   r   r   r   r   r   id
prompt_md5r   r   r   rd   r   r2   r2   r2   r3   r    s    r  c                   @  s^   e Zd ZdZefd!ddZd"ddZd#ddZd$ddZd%ddZ	d&ddZ
ed'ddZd S )(SQLAlchemyMd5Cacher   r   r   r   Type[FullMd5LLMCache]c                 C  r   r   r   r   r2   r2   r3   rb     s   zSQLAlchemyMd5Cache.__init__rd   r*   re   r+   rf   c                 C  s"   |  ||}|rdd |D S dS )rg   c                 S  r   r   r    r   r2   r2   r3   r>     r   z-SQLAlchemyMd5Cache.lookup.<locals>.<listcomp>N)_search_rows)ra   rd   re   r   r2   r2   r3   rj     s   zSQLAlchemyMd5Cache.lookuprk   r   r\   c              	     s       fddt|D }tj/}|  |D ]}|| q&W d   n1 s8w   Y  W d   dS W d   dS 1 sPw   Y  dS )r   c              
     s2   g | ]\}}j tt  t||d qS ))r  rd   r  r   r   r   )r   r*   uuiduuid1r   r   re   rd   r  ra   r2   r3   r>   !  s    	
z-SQLAlchemyMd5Cache.update.<locals>.<listcomp>N)_delete_previousget_md5r   r   r   r   r   r   r2   r  r3   ro     s   
	PzSQLAlchemyMd5Cache.updatec              	   C  s   t | jj| jj| |k| jj|k| jj|k| jj	}t
| j6}|  || }|D ]}|| q7W d    n1 sIw   Y  W d    d S W d    d S 1 saw   Y  d S r   )r   r   r   r   r  r  r   rd   r   r   r   r   r   r   r   r   )ra   rd   re   r   r   r   r   r2   r2   r3   r  0  s   

Pz#SQLAlchemyMd5Cache._delete_previous	List[Row]c                 C  s   |  |}t| jj| jj|k| jj|k| jj|k| jj	}t
| j}|| W  d    S 1 s<w   Y  d S r   )r  r   r   r   r   r  r   rd   r   r   r   r   r   r   )ra   rd   re   
prompt_pd5r   r   r2   r2   r3   r  =  s   


$zSQLAlchemyMd5Cache._search_rowsrp   r   c                 K  s@   t | j}|| j  W d   dS 1 sw   Y  dS rq   )r   r   r   r   r   r   r2   r2   r3   rt   I  s   "zSQLAlchemyMd5Cache.clearinput_stringc                 C  r,   r   r-   )r  r2   r2   r3   r  N  r5   zSQLAlchemyMd5Cache.get_md5N)r   r   r   r  rz   r{   r  )rd   r*   re   r*   r+   r  r|   )r  r*   r+   r*   )r}   r~   r   r   r  rb   rj   ro   r  r  rt   r   r  r2   r2   r2   r3   r    s    




r  langchain_astradb_cachec                   @  sl   e Zd ZdZedddddd'ddZed(ddZd)ddZd*ddZ		d+d,dd Z
d-d!d"Zd.d%d&ZdS )/AstraDBCacheaQ  
    Cache that uses Astra DB as a backend.

    It uses a single collection as a kv store
    The lookup keys, combined in the _id of the documents, are:
        - prompt, a string
        - llm_string, a deterministic str representation of the model parameters.
          (needed to prevent same-prompt-different-model collisions)
    N)collection_nametokenapi_endpointastra_db_client	namespacer  r*   r  rX  r  r  Optional[AstraDB]r  c                C  s2   t ||||d}|j| _| jj|d| _|| _dS )a  
        Create an AstraDB cache using a collection for storage.

        Args (only keyword-arguments accepted):
            collection_name (str): name of the Astra DB collection to create/use.
            token (Optional[str]): API token for Astra DB usage.
            api_endpoint (Optional[str]): full URL to the API endpoint,
                such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
            astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
                you can pass an already-created 'astrapy.db.AstraDB' instance.
            namespace (Optional[str]): namespace (aka keyspace) where the
                collection is created. Defaults to the database's "default namespace".
        r  r  r  r  )r  N)r%   astra_dbcreate_collection
collectionr  )ra   r  r  r  r  r  	astra_envr2   r2   r3   rb   a  s   
zAstraDBCache.__init__rd   re   r+   c                 C     t |  dt | S N#r   r   r2   r2   r3   _make_id     zAstraDBCache._make_idrf   c                 C  sT   |  ||}| jjd|iddidd d }|dur(t|d }|dur&|S dS dS )rg   _idr  r  )filter
projectiondatar  N)r  r  find_onerZ   )ra   rd   re   doc_idr   r6   r2   r2   r3   rj     s"   zAstraDBCache.lookuprk   r   r\   c                 C  s*   |  ||}t|}| j||d dS )rm   )r  r  N)r  rQ   r  upsert)ra   rd   re   rk   r  r  r2   r2   r3   ro     s   zAstraDBCache.updater   r   r  r  c                 C  r  r  r  r  r2   r2   r3   r    r  zAstraDBCache.delete_through_llmc                 C  s   |  ||}| j| dS )r  N)r  r  
delete_one)ra   rd   re   r  r2   r2   r3   r     s   zAstraDBCache.deleterp   r   c                 K     | j | j dS r  r  truncate_collectionr  rs   r2   r2   r3   rt     r5   zAstraDBCache.clear)
r  r*   r  rX  r  rX  r  r  r  rX  r   rz   r{   r   r  r  r|   )r}   r~   r   r   &ASTRA_DB_CACHE_DEFAULT_COLLECTION_NAMErb   r   r  rj   ro   r  r   rt   r2   r2   r2   r3   r  V  s     "


r   langchain_astradb_semantic_cachec                   @  s   e Zd ZdZedddddedd3ddZd4ddZed5ddZ	d6ddZ
d7d!d"Zd8d$d%Z	d9d:d*d+Zd;d-d.Zd<d1d2ZdS )=AstraDBSemanticCachea  
    Cache that uses Astra DB as a vector-store backend for semantic
    (i.e. similarity-based) lookup.

    It uses a single (vector) collection and can store
    cached values from several LLMs, so the LLM's 'llm_string' is stored
    in the document metadata.

    You can choose the preferred similarity (or use the API default) --
    remember the threshold might require metric-dependend tuning.
    N)r  r  r  r  r  r  similarity_thresholdr  r*   r  rX  r  r  r  r  r   r   r  r
  r   c                  sv   t ||||d}	|	j _| _| _| _ttdd fdd}
|
 _   _	| _
 jj j
 j	 jd	 _d
S )a  
        Initialize the cache with all relevant parameters.
        Args:

            collection_name (str): name of the Astra DB collection to create/use.
            token (Optional[str]): API token for Astra DB usage.
            api_endpoint (Optional[str]): full URL to the API endpoint,
                such as "https://<DB-ID>-us-east1.apps.astra.datastax.com".
            astra_db_client (Optional[Any]): *alternative to token+api_endpoint*,
                you can pass an already-created 'astrapy.db.AstraDB' instance.
            namespace (Optional[str]): namespace (aka keyspace) where the
                collection is created. Defaults to the database's "default namespace".
            embedding (Embedding): Embedding provider for semantic
                encoding and search.
            metric: the function to use for evaluating similarity of text embeddings.
                Defaults to 'cosine' (alternatives: 'euclidean', 'dot_product')
            similarity_threshold (float, optional): the minimum similarity
                for accepting a (semantic-search) match.

        The default score threshold is tuned to the default metric.
        Tune it carefully yourself if switching to another distance metric.
        r  r  r   r*   r+   r  c                   r  r  r  r   r`   r2   r3   r    r  z7AstraDBSemanticCache.__init__.<locals>._cache_embedding)r  	dimensionr  Nr  )r%   r  r   r  r
  r   ,ASTRA_DB_SEMANTIC_CACHE_EMBEDDING_CACHE_SIZEr  r  r  r  r  r  )ra   r  r  r  r  r  r   r  r
  r  r  r2   r`   r3   rb     s(   "
zAstraDBSemanticCache.__init__r+   r  c                 C  r  r  r  r`   r2   r2   r3   r    r  z-AstraDBSemanticCache._get_embedding_dimensionrd   re   c                 C  r  r  r   r   r2   r2   r3   r    r  zAstraDBSemanticCache._make_idrk   r   r\   c                 C  sB   |  ||}t|}| j|d}t|}| j||||d dS )rm   r   )r  r  r  z$vectorN)r  r4   r  rQ   r  r  )ra   rd   re   rk   r  r  r  r  r2   r2   r3   ro     s   zAstraDBSemanticCache.updaterf   c                 C  r  r  r  r  r2   r2   r3   rj   )  r  zAstraDBSemanticCache.lookupr  c                 C  sn   | j |d}t|}| jj|d|iddgdd}|du s#|d | jk r%dS t|d }|dur5|d |fS dS )	z
        Look up based on prompt and llm_string.
        If there are hits, return (document_id, cached_entry) for the top hit
        r   r  r  r  T)r  r  fieldsinclude_similarityNz$similarity)r  r4   r  vector_find_oner
  rZ   )ra   rd   re   r  r  r  r6   r2   r2   r3   r  1  s   	z#AstraDBSemanticCache.lookup_with_idr   r   r  r  c                 C  r  r  r  r  r2   r2   r3   r  N  r  z/AstraDBSemanticCache.lookup_with_id_through_llmr  c                 C  s   | j | dS )r  N)r  r  r  r2   r2   r3   r  W  s   z*AstraDBSemanticCache.delete_by_document_idrp   r   c                 K  r  r  r  rs   r2   r2   r3   rt   _  r5   zAstraDBSemanticCache.clear)r  r*   r  rX  r  rX  r  r  r  rX  r   r   r  rX  r
  r   r  r   r{   rz   r  r   r  r  r|   )r}   r~   r   r   r  )ASTRA_DB_SEMANTIC_CACHE_DEFAULT_THRESHOLDrb   r  r   r  ro   rj   r  r  r  rt   r2   r2   r2   r3   r	    s(    
B



	r	  )r)   r*   r+   r*   )r6   r   r+   r*   )rD   r*   r+   r   )rR   r*   r+   rS   )r>  r?  r@  r*   r+   r\   )r   rJ  r+   r\   )jr   
__future__r   r.   r.  rA   loggingr  r   abcr   datetimer   	functoolsr   typingr   r   r   r	   r
   r   r   r   r   r   
sqlalchemyr   r   r   r   r   sqlalchemy.enginer   sqlalchemy.engine.baser   sqlalchemy.ormr   r   r   sqlalchemy.ext.declarativelangchain_core.cachesr   r   langchain_core.embeddingsr   #langchain_core.language_models.llmsr   r   langchain_core.load.dumpr   langchain_core.load.loadr!   langchain_core.outputsr"   r#   langchain_core.utilsr$   %langchain_community.utilities.astradbr%   &langchain_community.vectorstores.redisr'   r  	getLogger__file__rW   rS  
astrapy.dbr(   cassandra.clusterCassandraSessionr4   rC   rN   rQ   rZ   r[   Baser   r   r   r   r   r   r   r   r  rI  rM  rN  r  r  rt  r  r  r  r  r  r  r  r  r  r  r  r  r	  r2   r2   r2   r3   <module>   s    0





'"
3	V29S  

 $e #Hh