  
# views.py
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# from .models import ChatMessage
from django.contrib.auth.models import User
from django.http import JsonResponse,HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
from datetime import timedelta,timezone,datetime
from time import time,sleep
from langchain.chat_models import ChatOpenAI
from langchain_google_genai import ChatGoogleGenerativeAI
from google import genai
from google.genai.types import HttpOptions
import os,sys
import io
from typing import List
from langchain.load.dump import dumps
import asyncio
import aiohttp
import elasticsearch
# import uuid
from langchain_core.messages import (
    BaseMessage,
    message_to_dict,
    messages_from_dict,
)
from glob import glob
from math import floor
from langchain.prompts import ChatPromptTemplate,PromptTemplate
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_openai import OpenAIEmbeddings
from openai import AsyncOpenAI
from langchain_community.vectorstores import ElasticsearchStore
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain_core.messages.human import HumanMessage
from langchain_core.messages.ai import AIMessage
import requests
from requests.auth import HTTPBasicAuth
from elasticsearch import Elasticsearch,NotFoundError
from collections import OrderedDict
from decouple import config
from openai import OpenAI
from PIL import Image
from io import BytesIO
from django.core.mail import send_mail
from langchain_core.messages.system import SystemMessage
from langchain_core.messages.chat import ChatMessage
sys.path.append(os.path.join(os.path.dirname(__file__)))
from langchain_core.documents import Document
import os
import pandas as pd
from Loader_functions_utils import DocumentLoader
document_loader = DocumentLoader()
from langchain.docstore.document import Document as BaseDocument
import tiktoken
from django.contrib.auth import authenticate, login
from django.shortcuts import redirect
from django.urls import reverse
from concurrent.futures import ThreadPoolExecutor, as_completed
import concurrent.futures
import base64
import concurrent.futures
import openai
import inspect
print(datetime.now().strftime('%Y-%m-%d'))


# LOG_FILE = f"var/www/html/CAA_Allibot30/AlliBot30_app/log/caa_allibot_bot-log-{datetime.now().strftime('%Y-%m-%d')}.log"
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_DIR = os.path.join(BASE_DIR, 'AlliBot30_app', 'log')
os.makedirs(LOG_DIR, exist_ok=True)
LOG_FILE = os.path.join(LOG_DIR, f"caa_allibot_bot-log-{datetime.now().strftime('%Y-%m-%d')}.log")


def custom_log(level, message):
    # Get caller frame info
    frame = inspect.currentframe().f_back
    filename = os.path.basename(frame.f_code.co_filename)
    lineno = frame.f_lineno
    module = frame.f_globals["__name__"]

    # Format timestamp
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    # Format log line like your formatter
    log_line = f"{timestamp} - {module} - {level.upper():>8s} - [{filename:25s} : {lineno:3d}] - {message}\n"

    # Write to file
    with open(LOG_FILE, "a") as f:
        f.write(log_line)

    # Also print to stdout (like screen handler)
    print(log_line, end="")



organ_user_ind=config('org_auth_index')
# corporate_token_ind=config('token_index')
custom_domain = "https://chatbot.scic.com/CAA-AlliBot"


def text_generation_google_gemini_api(user_prompt):
    gemini_url = "https://generativelanguage.googleapis.com/v1/models/gemini-2.0-flash-001:generateContent?key=AIzaSyA3NBj38uVck1-DusRC-sPdSGT0Lg_ZJ6Q"
    body = {
    "contents": [
      {
        "parts": [
          {
            "text": user_prompt
          }
        ]
      }
    ]
  }
    headers = {
        "Content-Type": "application/json"
    }
    
    # Use json parameter instead of data for JSON encoding
    request = requests.post(url=gemini_url, json=body, headers=headers)
    
    return request.json()



def get_mime_type_from_extension(file_path):
    ext = file_path.lower().split('.')[-1]
    mime_map = {
        'jpg': 'image/jpeg',
        'jpeg': 'image/jpeg',
        'png': 'image/png',
        'bmp': 'image/bmp'
    }
    return mime_map.get(ext, 'application/octet-stream')




def insert_document(session_id: str, document_extract: str, file_name: str,index_name: str = "caa_allibot_file_doc_index") -> str:
    """
    Inserts a document into the specified Elasticsearch index.

    Args:
        session_id (str): The session identifier.
        document_extract (str): The extracted text content.
        index_name (str): The Elasticsearch index name (default: caa_allibot_file_doc_index).

    Returns:
        str: The ID of the inserted document.
    """
    es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

    doc = {
        "session_id": session_id,
        "document_extract": document_extract,
        "file_name": file_name
    }

    response = es.index(index=index_name, document=doc)
    return response["_id"]


def get_document_by_session_id(session_id: str, index_name: str = "caa_allibot_file_doc_index") -> dict:
    """
    Retrieves the document with the given session_id from the specified index.

    Args:
        session_id (str): The session identifier to search for.
        index_name (str): The Elasticsearch index name.

    Returns:
        dict: The matching document or None if not found.
    """
    es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

    query = {
        "query": {
            "term": {
                "session_id": {
                    "value": session_id
                }
            }
        }
    }

    response = es.search(index=index_name, body=query)

    hits = response.get("hits", {}).get("hits", [])
    if hits:
        return hits[0]["_source"]  # Return the first match
    else:
        return None

def gemini_token_counter(text):
    client = genai.Client(api_key="AIzaSyA3NBj38uVck1-DusRC-sPdSGT0Lg_ZJ6Q")
    tokens_counter = client.models.count_tokens(
        model="gemini-2.0-flash", contents=text
    )
    
    return tokens_counter.total_tokens

def generate_content_with_image(image_path: str):
    model="gemini-2.0-flash-001"
    client = genai.Client(api_key="AIzaSyA3NBj38uVck1-DusRC-sPdSGT0Lg_ZJ6Q", http_options=HttpOptions(api_version="v1"))

    mime_type = get_mime_type_from_extension(image_path)
    with open(image_path, "rb") as img_file:
        image_base64 = base64.b64encode(img_file.read()).decode("utf-8")
    
    filename=image_path.split('/')[-1]
    response = client.models.generate_content(
        model=model,
        contents=[{
            "role": "user",
            "parts": [
                {"text": f"Extracted text in the {filename} image, remove if any ** and ###"},
                {
                    "inline_data": {
                        "mime_type": mime_type,
                        "data": image_base64
                    }
                }
            ]
        }]
    )

    return response.candidates[0].content.parts[0].text.strip()





def extract_reference_table(xlsx_path: str) -> str:
                        """Extracts the reference table from an XLSX file and returns it as a CSV string."""
                        df = pd.read_excel(xlsx_path)  
                        return df.to_csv(index=False) 



def chatbot(request):
    name = request.session.get('name')  # Get 'name' from session
    unique_id= request.session.get('unique_id')
    companyCode = request.session.get('companyCode')
    custom_log("info",f"entered chatbot function")

    if name:
        custom_log("info",f"User logged in, rendering chatbot page {name} {unique_id} {companyCode}")
        return render(request, "AlliBot_index.html", {'user_name': name,'unique_id':unique_id,'companyCode':companyCode})
        

    else:
        custom_log("info","User not logged in, redirecting to login page.")
        return redirect("login")  # Redirect to login if name is not in session


@csrf_exempt
def login_view(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        password = request.POST["password"]
        userId=request.POST["userId"]
        print('username', username)
        print('password', password)
        custom_log("info","username%s " % username)
        custom_log("info","userID%s " % userId)

        try:
            # Query Elasticsearch to find the user
            response = es.search(
                index=organ_user_ind,
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userId}},
                            ]
                        }
                    }
                }
            )

            # Check if any user matched the query
            if response['hits']['total']['value'] > 0:
                for hit in response['hits']['hits']:
                    source = hit['_source']
                    if 'password' in source:
                        if source['password'] == password:
                            # Set session data for the logged-in user
                            print('source',source)
                            if 'companyCode' in source:
                                request.session['companyCode'] = source['companyCode']
                            else:
                                request.session['companyCode'] = None
                            if 'Riea_company' in source:
                                request.session['Riea_company'] = source['Riea_company']
                            else:
                                request.session['Riea_company'] = None
                            
                            print('session coman',request.session['companyCode'])


                            request.session['name'] = username
                            request.session['unique_id'] = userId
                            


                            custom_log("info",f"User {username} authenticated successfully. Redirecting to index.")
                            return redirect(chatbot)  # Redirect to index after successful login
                        else:
                            return render(request, "AlliBot_login.html", {"error": "Invalid credentials"})
                            print("error1")
                    else:
                        return render(request, "AlliBot_login.html", {"error": "Invalid credentials"})
                        print("error2")
            else:
                return render(request, "AlliBot_login.html", {"error": "Invalid credentials"})
                print("error3")

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    # If the request is GET, render the login page
    return render(request, "AlliBot_login.html")

@csrf_exempt
def get_statuslist(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        userId=request.POST["userid"]
        print('username', username)
        custom_log("info","username%s " % username)

        try:
            # Query Elasticsearch to find the user
            response = es.search(
                index='notifications',
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userId}},
                            ]
                        }
                    }
                }
            )
            
            userlist=[]
            # Check if any user matched the query
            if response['hits']['total']['value'] > 0:
                for hit in response['hits']['hits']:
                    source = hit['_source']
                    if source.get("request", 0) ==2:
                        request_sta="Approved"
                    elif source.get("request", 0) ==3:
                        request_sta="Declined"
                    else:
                        request_sta="Pending"
                        
                    formatted_doc = {
                        "name": source.get("username"),
                        "user_id": source.get("user_id"),
                        "message": source.get("message"),
                        "created_at": source.get("created_at"),
                        "request": request_sta
                        
                    }
                    userlist.append(formatted_doc)
                    
            return JsonResponse({"userlist": userlist})

                    
        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

@csrf_exempt
def logout_view(request):
    request.session.flush()  # Clears all session data
    return redirect('login')  # Redirect to login page after logout

@csrf_exempt
def forgot_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        userid = request.POST.get("userid")
        reset_path = "/reset-password/"
        
        # Build the full URL
        reset_url = f"{custom_domain}{reset_path}"
        
        try:
            # Search for the user in Elasticsearch
            response = es.search(
                index=organ_user_ind,
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userid}},
                            ]
                        }
                    }
                }
            )

            if response['hits']['total']['value'] > 0:
                user_data = response['hits']['hits'][0]['_source']
                user_email = user_data.get("mail")  # Attempt to retrieve the email
            
                if user_email:  # Check if email is present
                    print('user_email', user_email)
                    
                    # Send verification email
                    message = "Click the link below to reset your password:\n\n"
                    # message += request.build_absolute_uri("/reset-password/")
                    message += reset_url

                    print('message', message)
                    
                    send_mail(
                        subject="Password Reset Request",
                        message=message,
                        from_email="tna@scic.com",
                        recipient_list=[user_email],
                        fail_silently=False,
                    )
                    
                    return render(request, "AlliBot_forgot_password.html", {"message": "Password reset link sent to your email."})
                else:
                    # Handle missing email
                    return render(request, "AlliBot_forgot_password.html", {"error": "Email not found for this user."})
            else:
                return render(request, "AlliBot_forgot_password.html", {"error": "User not found."})

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "AlliBot_forgot_password.html")

@csrf_exempt
def reset_password_view(request):
    if request.method == "POST":
        username = request.POST.get("username")
        userid = request.POST.get("userid")
        new_password = request.POST.get("new_password")

        try:
            # Find the user by username
            response = es.search(
                index=organ_user_ind,
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userid}},
                            ]
                        }
                    }
                }
            )

            if response['hits']['total']['value'] > 0:
                user_id = response['hits']['hits'][0]['_id']

                # Update the user's password
                es.update(
                    index=organ_user_ind,
                    id=user_id,
                    body={
                        "doc": {
                            "password": new_password  # Be sure to hash this in a real application
                        }
                    }
                )
                return render(request, "AlliBot_reset_password.html", {"message": "Password has been reset successfully."})
            else:
                return HttpResponse("User not found.")

        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")

    return render(request, "AlliBot_reset_password.html")


@csrf_exempt
def signout(request):
    del request.session['name']
    del request.session['unique_id']
    login_url = reverse('login')  # Ensure the 'login' view is correctly mapped in your URLs
    return JsonResponse({
        'message': 'Signout successful', 
        'redirect_url': login_url
    })

@csrf_exempt
def get_statuslist(request):
    if request.method == "POST":
        # Handle the login form submission
        username = request.POST["username"]
        userId=request.POST["userid"]
        print('username', username)
        # custom_log("info","username%s " % username)

        try:
            # Query Elasticsearch to find the user
            response = es.search(
                index='notifications',
                body={
                    "query": {
                        "bool": {
                            "must":[
                                {"term": {"username.keyword": username}},
                                {"term": {"user_id.keyword": userId}},
                            ]
                        }
                    }
                }
            )
            
            userlist=[]
            # Check if any user matched the query
            if response['hits']['total']['value'] > 0:
                for hit in response['hits']['hits']:
                    source = hit['_source']
                    if source.get("request", 0) ==2:
                        request_sta="Approved"
                    elif source.get("request", 0) ==3:
                        request_sta="Declined"
                    else:
                        request_sta="Pending"
                        
                    formatted_doc = {
                        "name": source.get("username"),
                        "user_id": source.get("user_id"),
                        "message": source.get("message"),
                        "created_at": source.get("created_at"),
                        "request": request_sta
                        
                    }
                    userlist.append(formatted_doc)
                    
            return JsonResponse({"userlist": userlist})

                    
        except Exception as e:
            return HttpResponse(f"An error occurred: {e}")
        
# Define the search query
def get_messages_by_user_and_session(username,ind_name,customer):
    query = {
       "query": {
         "bool": {
           "must": [
             {"term": {"username.keyword": username}},
             {"term": {"candidate_name.keyword": customer}}
           ]
         }
       },
       "size":10000,
       "sort": [
           {"lastmodified_at": {"order": "asc"}}
       ]
    }
    # Execute the search query
    response = es.search(index=ind_name, body=query)

    # Extract and return the messages
    messages = [hit['_source'] for hit in response['hits']['hits']]
    return messages

def unique_sources_with_pages(meta_data):
    unique_sources = set()
    
    for item in meta_data:
        source = item.get('source', '') 
        filename = os.path.basename(source)  # Get the file name with extension
        name_without_ext = os.path.splitext(filename)[0]  
        unique_sources.add(name_without_ext)
    
    # Convert the set of unique sources to a comma-separated string
    unique_sources_str = ', '.join(unique_sources)
    
    return unique_sources_str  
    
os.environ["OPENAI_API_KEY"] = config('SECRET_KEY')

print(os.getenv("GOOGLE_API_KEY"))
os.environ["GOOGLE_API_KEY"] = "AIzaSyALMkcDFVJ0GUpKFpqLqdjjvYsmBxEWuYU"


embeddings = OpenAIEmbeddings()


es_index_name = config('es_index_name')
chatname_index = config('chatname_index')
imtext_index = config('textlensindex')
forms_index = config('formsindex')
org_details_index = config('riea_org_index')
user_details_index = config('riea_token_index')
doc_index = config('compareindex')



# custom_log("info",es_index_name)
es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False

try :
    db = ElasticsearchStore(
        es_connection=es,
        index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others',"allibot_faq","allibot-faqs-full","caa_index","caais_index","carrier_appointments_master_list_caais","carrier_appointments_master_list_cl","carrier_appointments_master_list_personal","carrier_appointments_master_list_wholesalers","caa_index","caais_index","carrier_appointments_master_list_caais","carrier_appointments_master_list_cl","carrier_appointments_master_list_personal","carrier_appointments_master_list_wholesalers"],
        #index_name=['cml_study_guide', 'docx_pdf', 'new_pdf', 'pl_pilot_24_02_21_lg','botcoach_index','mcq','pdf_test','docx_test','excel_test','csv_test','xlsx','word_docx','pdf_docs','html_unstructured1'],
        embedding=embeddings,
        strategy=ElasticsearchStore.ExactRetrievalStrategy()
    )
except Exception as ex:        
    # logger.exception('Exception occured due to %s' % ex)
    if check_elastic_status():
        db = ElasticsearchStore(
            es_connection=es,
            index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others',"allibot_faq","allibot-faqs-full","caa_index","caais_index","carrier_appointments_master_list_caais","carrier_appointments_master_list_cl","carrier_appointments_master_list_personal","carrier_appointments_master_list_wholesalers","caa_index","caais_index","carrier_appointments_master_list_caais","carrier_appointments_master_list_cl","carrier_appointments_master_list_personal","carrier_appointments_master_list_wholesalers"],
            embedding=embeddings,
            strategy=ElasticsearchStore.ExactRetrievalStrategy()
        )
        
def get_chat_id_names_by_session_id(index_name, session_id):
    query = {
        "size": 5000,
        "query": {"term": {"session_id.keyword": session_id}}  # Match the exact session ID
    }

    
    response = es.search(index=index_name, body=query)
    
    chat_id_name=''
    for hit in response['hits']['hits']:
        source = hit['_source']
        if 'chat_id_name' in source:
            chat_id_name = source.get('chat_id_name')
       
    return chat_id_name


def add_user_message_to_es(customer,username, session_id, user_prompt,index,user_token,modelname,is_safe, flagged_categories):
    if not isinstance(user_prompt, str) or not user_prompt.strip():
        custom_log("error",f"Invalid user_prompt: must be a non-empty string, got {user_prompt}", )
        return None
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(HumanMessage(content=user_prompt)),
            ensure_ascii=True,
        ),
        "human token" : user_token,
        "is_safe":is_safe,
        "flagged_categories":flagged_categories,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    document_id = res.get('_id')
    return document_id
    # return res

def add_chat_message_to_es(customer,username, session_id, user_prompt,index,user_token,modelname,is_safe):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'user_prompt': user_prompt,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(ChatMessage(content=user_prompt, role="user")),
            ensure_ascii=True,
        ),
        "human token" : user_token,
        "is_safe":is_safe,
        "flagged_categories":{
                "violence": False,
                "hate": True,
                "self_harm": False
            },
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=index, body=doc)
    return res

def add_assistant_message_to_es(customer,username, session_id, answer,assistant_token,modelname):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(AIMessage(content=answer)),
            ensure_ascii=True,
        ),
        "assistant token" : assistant_token,
        "feedback" : "null",
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res

def add_system_message_to_es(customer,username, session_id, answer):
    # Create a document with the required fields
    doc = {
        'candidate_name':customer,
        # 'modelname':modelname,
        'username': username,
        'session_id': session_id,
        'answer': answer,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(
            message_to_dict(SystemMessage(content=answer)),
            ensure_ascii=True,
        ),
        # "assistant token" : assistant_token,
        "feedback" : "null",
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res
def search_by_session_id(index_name, session_id):
    # Define the query
    query = {
        "size": 1000,
        "query": {
            "match": {
                "session_id": session_id
            }
        },
        "sort": [
            {
                "lastmodified_at": {
                    "order": "asc"
                }
            }
        ]
    }

    # Perform the search
    response = es.search(index=index_name, body=query)

    return response['hits']['hits']

def document_to_dict(doc):
    # Convert Document to a string representation if storing as text
    if hasattr(doc, 'metadata') and 'source' in doc.metadata:
        # Extract the file name from the source path
        file_name = os.path.basename(doc.metadata['source'])
        # Update the metadata with the new 'source' containing only the file name
        doc.metadata['source'] = file_name
        
    if isinstance(doc, dict):
        # If it is a dictionary, access keys
        return {
            'page_content': doc.get('page_content', ''),
            'metadata': doc.get('metadata', ''),
        }
    else:
    # Return page content and updated metadata as a JSON string
        return {
            'page_content': doc.page_content,
            'metadata': json.dumps(doc.metadata) if hasattr(doc, 'metadata') else '',
        }

def add_document_ai_to_es(username, session_id, docs1, es_index_name,orderid,file):
    # Convert docs1 to a list of dictionaries as strings
    documents_dicts = [document_to_dict(doc) for doc in docs1]
    
    # Create a document with the required fields
    doc = {
        'username': username,
        'session_id': session_id,
        'user_prompt': orderid,  # Store as a JSON string
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        "history": json.dumps(documents_dicts, ensure_ascii=True),
        "filename":file,
    }
    
    # Index the document in Elasticsearch
    res = es.index(index=es_index_name, body=doc)
    return res


def update_lastmodified_at(index_name, doc_id):
    # Get current timestamp in milliseconds
    current_timestamp = int(datetime.now().timestamp() * 1000)
    
    # Define the update body
    update_body = {
        "doc": {
            "lastmodified_at": current_timestamp  # Update with timestamp in milliseconds
        }
    }

    # Perform the update
    es.update(index=index_name, id=doc_id, body=update_body)
    
def update_create_at_with_sessionid(index_name,session_id):
    documents = search_by_session_id(index_name, session_id)
    # Update 'lastmodified_at' field for each matched document
    for doc in documents:
        doc_id = doc['_id']  # Get the document ID
        update_lastmodified_at(index_name, doc_id)

        

def resize_image(image_path, size=(224, 224)):
    with Image.open(image_path) as img:
        img.thumbnail(size, Image.LANCZOS)
        buffer = BytesIO()
        img.save(buffer, format="BMP")
        return buffer.getvalue()

def encode_image(image_data):
    return base64.b64encode(image_data).decode("utf-8")

def get_current_iuser_idso_date():
    return datetime.utcnow().isoformat()    

def store_chat_history(es, index_name, chat_entry):
    es.index(index=index_name, document=chat_entry)

def retrieve_chat_data(indexname,session_id):
    # Fetch data from Elasticsearch
    response = es.get(index=indexname, id=session_id)
    
    if response["found"]:
        chat_data = response["_source"]
        return chat_data["documents"], chat_data["chat_history"]
    else:
        return None, None



def chat_hist_img(base64_image,user_id,message,response,session_id,index_name_img,es):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        #"timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
        "img": base64_image
    }
    
    store_chat_history(es, index_name_img, chat_entry)
  
  
def img_message_to_dict(message):
    if isinstance(message, HumanMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "human", "data": {"content": content}}
    elif isinstance(message, AIMessage):
        content = message.content
        if isinstance(content, list):
            content = " ".join([part['text'] for part in content if part['type'] == 'text'])
        return {"type": "ai", "data": {"content": content}}
    else:
        raise ValueError(f"Unexpected message type: {type(message)}")



def img_message_from_dict(message_dict):
    # Check if the input is a list of messages
    if isinstance(message_dict, list):
        return [img_message_from_dict(m) for m in message_dict]
    
    message_type = message_dict.get("type")
    if message_type == "human":
        return HumanMessage(content=message_dict["data"]["content"])
    elif message_type == "ai":
        return AIMessage(content=message_dict["data"]["content"])
    else:
        raise ValueError(f"Got unexpected message type: {message_type}")    


def retrieve_chat_history(es, index_name, session_id):
    res = es.search(index=index_name, body={
        "query": {
            "match": {"session_id": session_id}
        },
        "size": 5000
    })
    
    return res['hits']['hits']

def chat_hist_txt(user_id,session_id,message,response,es,index_name):
    # Store the initial chat history
    chat_entry = {
        "username": user_id,
        "session_id": session_id,
        "created_at": round(time() * 1000),
        "lastmodified_at": round(time() * 1000),
        # "timestamp": get_current_iso_date(),  # Ensure this is a string, not a function
        "messages": [
            img_message_to_dict(message),
            img_message_to_dict(response)
        ],
    }
    
    store_chat_history(es, index_name, chat_entry)  
    
def encode_image1(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def avi_to_base64(file):
    with open(file, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")
    
def resize_and_convert_image(image_path):
    with Image.open(image_path) as img:
        # Resize the image if it exceeds a certain size (e.g., width or height > 1024)
        max_size = (1024, 1024)
        img.thumbnail(max_size, Image.LANCZOS)

        # Convert BMP images to JPEG
        if img.format == 'BMP':
            img = img.convert('RGB')  # Convert to RGB first
            converted_path = image_path.replace('.bmp', '.jpeg')
            img.save(converted_path, format='JPEG')
            os.remove(image_path)
            return converted_path
        else:
            # Save the resized image to a temporary file in the same format
            converted_path = image_path.replace(f".{img.format.lower()}", f".{img.format.lower()}")
            img.save(converted_path)
            return converted_path    
        
def add_documents(file_name1):
    _, extension_file_name1 = os.path.splitext(file_name1)
    resp=1
    docs1=''
    # Load the first file
    try:
        if extension_file_name1.lower() == ".xlsx":
            docs1 = document_loader.excel_file_loader(file_name1)
        elif extension_file_name1.lower() == ".csv":
            docs1 = document_loader.csv_file_loader(file_name1)
        elif extension_file_name1.lower() == ".tsv":
            docs1 = document_loader.tsv_file_loader(file_name1)
        elif extension_file_name1.lower() in [".doc", ".docx"]:
            docs1 = document_loader.doc_docx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pdf":
            docs1 = document_loader.pdf_file_loader(file_name1)
        elif extension_file_name1.lower() == ".pptx":
            docs1 = document_loader.pptx_file_loader(file_name1)
        elif extension_file_name1.lower() == ".xml":
            docs1 = document_loader.xml_file_loader(file_name1)
        elif extension_file_name1.lower() == ".json":
            docs1 = document_loader.json_file_loader(file_name1)
        elif extension_file_name1.lower() == ".txt":
            docs1 = document_loader.text_file_loader(file_name1)
        elif extension_file_name1.lower() == ".md":
            docs1 = document_loader.markdown_file_loader(file_name1)
        elif extension_file_name1.lower() == ".html":
            docs1 = document_loader.html_file_loader(file_name1)
        else:
            raise ValueError(f"Unsupported file type: {extension_file_name1}")
    except Exception as e:
        resp=0  

    return resp,docs1

def encode_file(file_path):
    with open(file_path, "rb") as file:
        return base64.b64encode(file.read()).decode("utf-8")
    
def index_chat_message(sessionid, username, new_content,indexname,candidate,alliance_number):
    # Retrieve the existing document
    try:
        doc = es.get(index=indexname, id=sessionid)
        existing_messages = doc['_source']['messages']
    except elasticsearch.NotFoundError:
        # Document does not exist, so start with an empty list
        existing_messages = []
    # Append the new content to the existing messages
    existing_messages.append({
        "data": {
            "content": new_content
        }
    })

    # Update the document with the appended messages
    es.update(
        index=indexname,
        id=sessionid,
        body={
            "doc": {  # Update the document fields
                "session_id": sessionid,
                "username": username,
                "created_at": round(time() * 1000),
                "lastmodified_at": round(time() * 1000),
                "messages": existing_messages,
                'candidate_name':candidate,
                "alliance_number" : alliance_number
            },
            "doc_as_upsert": True  # If the document doesn't exist, create it
        }
    )
        
   
def image_upload(file_path,sessionid,username,indexname,candidate,alliance_number):
    filename=file_path.split('/')[-1]
    message = HumanMessage(
        content=[
            {"type": "text", "text": f"Extracted text in the {filename} image, remove if any ** and ###"},
            {
                "type": "image_url",
                "image_url": {"url": f"data:image/jpeg;base64,{encode_file(file_path)}"},
            },
        ],
    )
    # custom_log("info",'img up entered')    
    image_conv=message_to_dict_img(message)
    # custom_log("info",'dict conv completed')
    index_chat_message(sessionid, username, [image_conv],indexname,candidate,alliance_number) 
    # custom_log("info",'check erro in im up')
    return HumanMessage(
        content=[
            {
                "type": "image_url",
                "image_url": {"url": f"data:image/jpeg;base64,{encode_file(file_path)}"},
            },
        ],
    )





def message_to_dict_list(hist):
    hist_list=[]
    for history in hist:
        if history.type == 'human':
            hist_list.append({'type': 'human', 'data':history.content})
        elif history.type == 'ai':
            ai_hist = history.content
            ai_hist_content = [{'type': 'text', 'text':f"{ai_hist}"}]
            hist_list.append({'type': 'ai', 'data':ai_hist_content}) 
        elif history.type == 'system':
            hist_list.append({'type': 'system', 'data':history.content})
    return hist_list       

def message_to_dict_img(hist):
        if hist.type == 'human':
            cont=hist.content
            return {'type': 'human', 'data':cont}
        elif hist.type == 'ai':
            cont=hist.content
            ai_hist_content = [{'type': 'text', 'text':f"{cont}"}]
            return {'type': 'ai', 'data':ai_hist_content}
        elif hist.type == 'system':
            cont=hist.content
            return {'type': 'system', 'data':cont}
            


def message_from_dict_list(his_list):
    con_list = []
    for i in his_list:
        if isinstance(i, dict):  # Ensure the item is a dictionary
            if i.get('type') == 'human':
                human_content = HumanMessage(i.get('data'))
                con_list.append(human_content)
            elif i['data'].get('type') == 'ai':
                ai_data = i.get('data', [])
                if ai_data and isinstance(ai_data, list):
                    ai_con = AIMessage(ai_data[0].get('text', ''))
                    con_list.append(ai_con)
        elif isinstance(i, list):  # Ensure the item is a dictionary
           for lt in i:
                if lt.get('type') == 'human':
                   human_content = HumanMessage(lt.get('data'))
                   con_list.append(human_content)
                elif lt.get('type') == 'ai':
                    ai_content = AIMessage(lt.get('data'))
                    con_list.append(ai_content)
                elif lt.get('type') == 'system':
                    sys_content = SystemMessage(lt.get('data'))
                    con_list.append(sys_content)

    return con_list 

def retrieve_chat_content(sessionid, indexname):
    # Query Elasticsearch to retrieve documents matching chat_id and user_id
    response = es.search(
        index=indexname,
        body={
            "query": {
                "bool": {
                    "must": [
                        {"match": {"session_id": sessionid}},
                    ]
                }
            }
        }
    )

    contents = []

    try:
    # Check if any documents are found
        if response['hits']['total']['value'] > 0:
            for hit in response['hits']['hits']:
                document = hit['_source']
                for message in document['messages']:
                    content = message['data']['content']
                    contents.append(content)

    except Exception as e:
        custom_log("error inside retrieve chat content", e)

    return contents if contents else None
    
# def retrieve_chat_content(sessionid, indexname):
#     response = es.search(
#         index=indexname,
#         body={
#             "query": {
#                 "bool": {
#                     "must": [
#                         {"match": {"session_id": sessionid}},
#                     ]
#                 }
#             }
#         }
#     )
 
#     contents = []
 
#     for hit in response.get('hits', {}).get('hits', []):
#         document = hit.get('_source', {})
#         messages = document.get('messages', [])
#         for message in messages:
#             data = message.get('data', {})
#             content = data.get('content')
#             if content:
#                 contents.append(content)
 
#     return contents

def query_and_combine(past_messages, user_query):
    llm = ChatOpenAI(
            model="gpt-4o",
        )
    # Specify the search type (e.g., 'similarity' or 'exact_match')
    search_type = 'similarity'  # Adjust this based on your requirements
    
    # Use the search method with the search_type argument
    retrieved_data = db.search(user_query, search_type=search_type)

    # Format the retrieved data
    formatted_data = f"{retrieved_data}"
    
    # Combine past messages with the retrieved data
    combined_messages = past_messages + [
        AIMessage(content=formatted_data)
    ]
    
    # Pass the combined data to ChatOpenAI
    response = llm.invoke(combined_messages)
    return response

def delete_document_content(index_name, session_id,condition_check):
    # Define the query to match documents with the given session_id and user_prompt as 'upload1'
    query = {
        "query": {
            "bool": {
                "must": [
                    {"match": {"session_id": session_id}},
                    {"match": {"user_prompt": condition_check}}
                ]
            }
        }
    }

    # Perform the delete_by_query operation
    delete_response = es.delete_by_query(index=index_name, body=query)
    
    # Check if any documents were deleted
    deleted_count = delete_response['deleted']
    
    if deleted_count > 0:
        print(f"Successfully deleted {deleted_count} documents with session_id '{session_id}' and user_prompt 'upload1'.")
    else:
        print("No documents found with the provided session ID and user_prompt 'upload1'.")

def num_tokens_from_string(string: str, model: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.get_encoding(model)
    num_tokens = len(encoding.encode(string))
    return num_tokens

def dict_to_tuple(d):
    """Convert dictionary to a tuple that can be hashed."""
    if isinstance(d, dict):
        return tuple((k, dict_to_tuple(v)) for k, v in sorted(d.items()))
    elif isinstance(d, list):
        return tuple(dict_to_tuple(i) for i in d)
    else:
        return d
    
def fix_syntax_error(text):
    # Replace all double quotes with escaped double quotes
    fixed_text = text.replace('"', '\\"')
    return fixed_text


def user_org_tokens(org_name, username, input_tokens, output_tokens,mailid):
    # Search for documents where organization_name matches
    corp_orgid=None
    user_tokens = es.search(
        index=user_details_index,
        body={
            'query': {
                'bool': {
                    'must': [
                        {'match': {'orgname': org_name}}
                    ],
                    'should': [
                        {'match': {'username': username}},
                        {'bool': {'must_not': {'exists': {'field': 'username'}}}}
                    ]
                }
            }
        }
    )['hits']['hits']

    # Check if any document was found for the organization
    if user_tokens:
        username_found = False
        for doc in user_tokens:
            doc_id = doc['_id']
            doc_source = doc['_source']

            # Check if username matches the current document
            if doc_source.get('username') == username:
                username_found = True

                # Retrieve existing input/output tokens with default values
                inp = doc_source.get('input_tokens', 0)
                outp = doc_source.get('output_tokens', 0)

                # Update the token values
                input_overall_tokens = input_tokens + inp
                output_overall_tokens = output_tokens + outp    
                overall_tokens = input_overall_tokens + output_overall_tokens
                
                # custom_log("info",'input_overall_tokens %s'%input_overall_tokens)
                # custom_log("info",'output_overall_tokens %s'%output_overall_tokens)
                # custom_log("info",'overall_tokens %s'%overall_tokens)
                
                # Prepare the updated document
                updated_doc = {
                    'doc': {
                        'username': username,
                        'mail':mailid,
                        'input_tokens': input_overall_tokens,
                        'output_tokens': output_overall_tokens,
                        'overall_tokens': overall_tokens,
                        'orgname': org_name,
                        'limits_token':1

                    }
                }

                # Update the document in Elasticsearch
                es.update(index=user_details_index, id=doc_id, body=updated_doc)
                print(f"Document updated for username: {username}")
                corp_orgid=doc_id
                break

        # If no document with the username is found but organization exists, create a new document
        if not username_found:
            new_doc = {
                'username': username,
                'mail':mailid,
                'input_tokens': input_tokens,
                'output_tokens': output_tokens,
                'overall_tokens': input_tokens + output_tokens,
                'orgname': org_name,
                'limits_token':1,
                'assigned_tokens':500
            }
            result=es.index(index=user_details_index, body=new_doc)
            print(f"New document created for username: {username}")
            corp_orgid = result['_id']
        return corp_orgid

    else:
        # custom_log("info",f"No document found for organization: {org_name}")
        # new_doc = {
        #     'username': username,
        #     'mail':mailid,
        #     'input_tokens': input_tokens,
        #     'output_tokens': output_tokens,
        #     'overall_tokens': input_tokens + output_tokens,
        #     'orgname': org_name,
            # 'limits_token':1,
            # 'assigned_tokens':500
        # }
        # result=es.index(index=user_details_index, body=new_doc)
        # print(f"New document created for username: {username}")
        # corp_orgid = result['_id']
        return corp_orgid

        
        
def org_token(input_tokens, output_tokens, org_name):
    # Search for the document with the given organization name
    org_toks = es.search(
        index=org_details_index,
        body={
            'query': {
                'match': {
                    'organization_name': org_name
                }
            }
        }
    )['hits']['hits']
    if org_toks:
        for doc in org_toks:
            doc_id = doc['_id']
            doc_source = doc['_source']
        # Retrieve existing input/output tokens with default values
        inp = doc_source.get('input_overall_tokens', 0)
        outp = doc_source.get('output_overall_tokens', 0)
        assigned_tokens = doc_source.get("assigned_tokens", 0)

 
        input_overall_tokens = input_tokens + inp
        output_overall_tokens = output_tokens + outp
        overall_tokens = input_overall_tokens + output_overall_tokens
        # This is hard coded now but have to change dynamically change
        # tokens_usable=100000
        percentage=floor((overall_tokens/assigned_tokens)*100)
 
 
        # Document to be updated
        update_doc = {
            'doc': {
                'output_overall_tokens': output_overall_tokens,
                'input_overall_tokens': input_overall_tokens,
                'overall_tokens': overall_tokens,
                'organization_name': org_name,
                'percenatge': percentage,
            }
        }

 
        # Update the document using its ID
        res = es.update(index=org_details_index, id=doc_id, body=update_doc)
        

@csrf_exempt
def moderate_input(text):
    """
    Check user input using the Moderation API.
    Returns a tuple (is_safe, flagged_categories).
    """
    try:

        client = OpenAI()
 
        response = client.moderations.create(
            model="omni-moderation-latest",
            input=text,
        )
        a=response.to_dict()
        flagged = a["results"][0]["flagged"]
        categories = a["results"][0]["categories"]
        return not flagged, categories if flagged else {}
    except Exception as e:
        # logger.exception(f"Moderation API failed: {e}")
        print(f"Moderation API failed: {e}")
        return True, {}  # Assume safe if API fails



    
@csrf_exempt
def allibot(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            mailid=request.session.get('email')
            companyCode=request.session.get('companyCode')
            
            custom_log("info", mailid)
               

            llm = ChatGoogleGenerativeAI(api_key = "AIzaSyA3NBj38uVck1-DusRC-sPdSGT0Lg_ZJ6Q",
                model  = "gemini-2.0-flash-001"
            )  
            # request.session.clear()
            upload_dir = os.path.join(os.path.dirname(__file__))+'/image_pth'
            if not os.path.exists(upload_dir):
                os.makedirs(upload_dir)
            if action_type == 'action1':
                print("into action 1")

                username =request.POST.get('username')

                session_id=request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                files = request.POST.getlist('file_names[]')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                alliance_number = request.session.get('unique_id')


                update_create_at_with_sessionid(es_index_name, session_id)
                input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'cl100k_base')
                input_token_cost_gemini_flash_2 = gemini_token_counter(user_prompt)
                is_safe, flagged_categories = moderate_input(user_prompt)
                add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,input_token_cost_gemini_flash_2,'gemini-flash-2.0',is_safe, flagged_categories)
                if is_safe:
                    try: 
                        if glob(upload_dir+'/*') and files:
                            a_with_path = [upload_dir+'/'+ filename for filename in files]
                            if a_with_path:
                                ai_type_conv=[]
                                img_only_resp=[]
                                for i in a_with_path:
                                    if i.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp')):
                                        encodeimg= image_upload(i,session_id,username,imtext_index,candidate)
                                        response = AIMessage(content = generate_content_with_image(i))
                                        ai_type_conv.append(message_to_dict_img(response))
                                        img_only_resp.append(response.content)
                                        filename_only = os.path.basename(i)
                                        insert_document(session_id, "True",filename_only)
                                        
                                    else:
                                        ind_response1,docs1=add_documents(i)
                                        file=i.split('/')[-1]
                                        formatted_data = [item.page_content for item in docs1]
                                        # Ensure the text is passed correctly in the content
                                        message = HumanMessage(
                                            content=[
                                                {"type": "text", "text": f"Filename: {file}"},
                                                {"type": "text", "text": f"Here is the text content of the uploaded document {file}:\n" + '\n'.join(formatted_data)}
                                            ]
                                        )
                                        joined_data = '\n'.join(formatted_data)

                                        template_file = f"""
                                                The following is the extracted text content from the uploaded document titled "{file}":

                                                {joined_data}

                                                Please carefully review the content above and generate a well-structured summary in paragraph form.
                                                The summary should be clear, concise, and limited to approximately 300 words.
                                                If it is relevant, you may mention the filename "{file}" in the summary.
                                                """
                                        contents = text_generation_google_gemini_api(template_file)
                                        contents = contents.get("candidates")[0].get("content").get("parts")[0].get("text")
                                        img_doc_cont=message_to_dict_img(message)
                                        index_chat_message(session_id, username, [img_doc_cont],imtext_index,candidate,alliance_number)
                                        # static_content="Could you please clarify the specific operations you'd like to perform on the uploaded documents? If you need further assistance or have additional questions, feel free to ask"
                                        ai_type_conv.append(message_to_dict_img(AIMessage(content=contents)))
                                        img_only_resp.append(contents)
                                        filename_only = os.path.basename(i)
                                        insert_document(session_id, "True",filename_only)
    
                    except Exception as e:
                        print(e)
                        # custom_log("info",'Exception occured in the image upload %s'%e)
                        custom_log("info - Exception occured in the image upload", e)
                    
                    files_dir = glob(upload_dir+'/*')
                    for f in files_dir:
                        os.remove(f)
                    if user_prompt:
                        sleep(3)
                        ret_chat = retrieve_chat_content(session_id,imtext_index)
                        # Checking if the document with that chat_id and user_id, if not create a new list 
                        if ret_chat:
                            converted_chat = ret_chat
                            converted_chat = message_from_dict_list(converted_chat)
                        else:
                            converted_chat = []
                        
                        clean_temp = '''Answer the question in your own words as truthfully as possible from the context given to you.
                                You are a helpful assistant. 
                                Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                                Give a line of space between the paragraphs.
                
                                If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.
                
                                If questions are asked where there is no relevant context available, simply respond with:
                                "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
                                Remove ** or any ### from the response and don't mentions that in response about the formatting
                                Answer the question in a natural way and even though if I ask the question along with the context, only answer the question and don't mention about the context in the response similarly provided on the context.
                                        '''

                        es= Elasticsearch('https://elastic:8oKIqy312EBsAPzWT64NUzji@scic-elasticsearch.es.us-central1.gcp.cloud.es.io:443',request_timeout=300,retry_on_timeout=True)


                        db = ElasticsearchStore(
                            es_connection=es,
                            index_name=['docx_new_page','pdf_new_page','additional_commercial_forms','docx_pdf_page','botcoach_index','html_unstructured1','excel_json_ind','pdf_json_ind','iso_forms_pdf_full_pdf','iso_forms_pdf_pagewise','json_others',"allibot_faq","allibot-faqs-full","caa_index","caais_index","carrier_appointments_master_list_caais","carrier_appointments_master_list_cl","carrier_appointments_master_list_personal","carrier_appointments_master_list_wholesalers"],
                            embedding=embeddings,
                            strategy=ElasticsearchStore.ExactRetrievalStrategy()
                        )
                        
                        # Convert the question to HumanMessage function    
                        user_input = HumanMessage(content=[{"type": "text", "text": user_prompt}])
                        
                        # Combining both retrieved chat history(converted_chat) and user_input
                        search_type = 'similarity'  # Adjust this based on your requirements
                                                
                        # Use the search method with the search_type argument
                        retrieved_data = db.search(user_prompt, search_type=search_type)
                        formatted_data = []
                        source_data_hist = []

                        for item in retrieved_data:
                            # Extract just the file name without extension
                            source_path = item.metadata.get("source", "")
                            source_name = os.path.splitext(os.path.basename(source_path))[0]

                            source_data_hist.append(source_name)

                            formatted_data = [
                            {"type": "text", "text": item.page_content} 
                            for item in retrieved_data 
                            if item.page_content.strip() != '{"text": " "}'  # Only add if page_content is not just whitespace or empty
                            ]     
                        
                        
                        source_string = "\n".join(source_data_hist)
                        # Create the AIMessage instance
                        
                        ai_message = HumanMessage(content=formatted_data)


                        result = get_document_by_session_id(session_id)
                        if result:
                            print("Document found:", result)
                            source_string = result.get("file_name")
                            if formatted_data:
                                all_chat = converted_chat +[user_input]
                            else:
                                all_chat = converted_chat + [user_input]

                        else:
                            print("No document found with that session_id.")
                            if formatted_data:
                                all_chat = converted_chat + [ai_message]+[user_input]
                            else:
                                all_chat = converted_chat + [user_input]
        

                        clean_temp = '''Answer the question in your own words as truthfully as possible from the context given to you.
                                You are a helpful assistant. 
                                Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                                Give a line of space between the paragraphs.
                
                                If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.
                
                                If questions are asked where there is no relevant context available, simply respond with:
                                "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
                                Remove ** or any ### from the response and don't mentions that in response about the formatting
                                Answer the question in a natural way and even though if I ask the question along with the context, only answer the question and don't mention about the context in the response similarly provided on the context.

                                        '''
                        

                        Clean_template =    f'''Answer the question in your own words as truthfully as possible from the context given to you.
                                You are a helpful assistant. 

                                user question:{user_prompt}

                                context: {formatted_data}

                                chat history: {ret_chat}


                                Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                                Give a line of space between the paragraphs.
                
                                If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.
                
                                If questions are asked where there is no relevant context available, simply respond with:
                                "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
                                Remove ** or any ### from the response and don't mentions that in response about the formatting
                                Answer the question in a natural way and even though if I ask the question along with the context, only answer the question and don't mention about the context in the response similarly provided on the context.

                                        '''  
                                                          
                        print("Clean_template", Clean_template)
                        all_chat.append(clean_temp)     
                        custom_log("info",f"alliance number {alliance_number}")
                        custom_log("info",f"user name {candidate}")
                        query = {
                        "query": {
                            "bool": {
                                "must": [
                                    {"term": {"user_id.keyword": alliance_number}},
                                    {"term": {"candidate_name.keyword": candidate}},
                                ]
                            }
                        }
                    }
    
                        # Search the index for matching records
                        response = es.search(index=user_details_index, body=query)
                        # print('response',response)
                        # Variable to hold the conversation activation state
                        # activate_conv = True
                        assigned_tokens=0
                        overall_tokens=0
                        # Process the results
                        custom_log("info1",f"user detail: {response}")
                        for hit in response["hits"]["hits"]:
                            custom_log("info",f"user details {hit}")
                            assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                            overall_tokens = hit["_source"].get("overall_tokens", 0)

                        try:
                            img_response1 = text_generation_google_gemini_api(Clean_template)   
                            img_response1 = img_response1.get("candidates")[0].get("content").get("parts")[0].get("text")    
                            print("img_response1", img_response1)
                        except Exception as e:
                            custom_log("error at allibot response", e)
                            return JsonResponse({'response': "There was an error processing your request. Please try again later.", 'session_id': session_id})                  
                             
                        output_token_cost_gemini_flash_2 = gemini_token_counter(img_response1)

                        custom_log("info",f"assigned_tokens {assigned_tokens}")    
                        print('assigned tokens',assigned_tokens)
                        custom_log("info",f"output tokens {output_token_cost_gemini_flash_2}")
                        print('oup',output_token_cost_gemini_flash_2)
        
                        if assigned_tokens-(overall_tokens+output_token_cost_gemini_flash_2) <0:
                            print('if satisfied')
                            response = es.search(
                            index=user_details_index,
                            body={
                                    "query": {
                                        "bool": {
                                            "must": [
                                                {"term": {"user_id.keyword": alliance_number}},
                                                {"term": {"candidate_name.keyword": candidate}},
                                            ]
                                        }
                                    },
                                }
                            )
                        
                            if response['hits']['total']['value'] > 0:
                                user_id = response['hits']['hits'][0]['_id']
                            
                                # Use script to either add or update 'limits_token'
                                es.update(
                                    index=user_details_index,
                                    id=user_id,
                                    body={
                                        "script": {
                                            "source": """
                                                if (ctx._source.containsKey('limits_token')) {
                                                    // If limits_token exists, update its value
                                                    ctx._source.limits_token = params.limits_token;
                                                } else {
                                                    // If limits_token doesn't exist, create it and set the value
                                                    ctx._source['limits_token'] = params.limits_token;
                                                }
                                            """,
                                            "params": {
                                                "limits_token": 0
                                            }
                                        }
                                    }
                                )
        
                            
                            # Search the index for matching records
                            response = es.search(index=user_details_index, body=query)
        
                            return JsonResponse({'response': 'You have reached your usage limit. Please contact your administrator to request additional tokens.','session_id':session_id})
                        else:  

                            
                            # llm = ChatOpenAI(model="gpt-4o")
                            
                            # Converting retrieved chat history and the recent user question to dictionary form to store in ElasticSearch
                            history = message_to_dict_list([user_input])
                            
                            # history.append(message_to_dict_img((img_response)))
                            # Converting the response to dictionary and appending to the history
                            history.append(message_to_dict_img((AIMessage(content=img_response1))))
                            
                            # Updating the current conversation to an already existing document 
                            index_chat_message(session_id,username, history,imtext_index,candidate,alliance_number)
                            output_token_cost_gemini_flash_2 = gemini_token_counter(img_response1)
                            
                            query = {
                                "query": {
                                    "bool": {
                                        "must": [
                                            {"term": {"user_id.keyword": alliance_number}},
                                            {"term": {"candidate_name.keyword": candidate}},
                                        ]
                                    }
                                }
                            }
            
                            # Search the index for matching records
                            response = es.search(index=user_details_index, body=query)

                            assigned_tokens=0
                            overall_tokens=0
                            # Process the results
                            for hit in response["hits"]["hits"]:
                                assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                                overall_tokens = hit["_source"].get("overall_tokens", 0)
            
                            # clean_resp = img_response.content
                            clean_resp = img_response1
                            add_assistant_message_to_es(candidate,username,session_id, clean_resp,output_token_cost_gemini_flash_2,'gemini-flash-2.0')

                            # add_assistant_message_to_es(candidate,username,session_id,img_response.content,output_token_cost_gpt4o,'gpt-4o')
                            org_token(input_token_cost_gemini_flash_2, output_token_cost_gemini_flash_2, org_name)
                            user_org_tokens(org_name,candidate,input_token_cost_gemini_flash_2,output_token_cost_gemini_flash_2,mailid)
                            try:                        
                                json_str=dumps(formatted_data)         
                                user_ques=dumps(user_prompt)
                                answer_prompt=dumps(clean_resp)
                                unique_entries_str=dumps(source_string)
                                cmd=f"/var/www/html/CAA_Allibot30/AlliBot30_app/Quesgen_answer_relevancy.py {user_ques} {answer_prompt} {json_str} {unique_entries_str}"                     # custom_log("info",cmd)
                                os.popen(cmd)
                                custom_log("error at quest1", cmd)
                                return JsonResponse({'response': clean_resp,'session_id':session_id})
                            except Exception as e:
                                custom_log("error at allibot response",e)                            
                                return JsonResponse({'response': clean_resp,'session_id':session_id})
                        
                    else:
                        unique_data = []
                        seen = set()
    
                        for d in ai_type_conv:
                            dict_tuple = dict_to_tuple(d)
                            if dict_tuple not in seen:
                                unique_data.append(d)
                                seen.add(dict_tuple)
    
                        for i in range(0,len(unique_data)):
                            index_chat_message(session_id,username, [unique_data[i]],imtext_index,candidate,alliance_number)
                        only_files_response='\n\n'.join(list(set(img_only_resp)))
                        output_token_cost_gemini_flash_2 = gemini_token_counter(only_files_response)
                        output_token_cost_gpt4o =  num_tokens_from_string(only_files_response, 'cl100k_base') 
                        
                      
                        add_assistant_message_to_es(candidate,username,session_id,only_files_response,output_token_cost_gemini_flash_2,'gemini-flash-2.0')
                        org_token(input_token_cost_gemini_flash_2, output_token_cost_gemini_flash_2, org_name)
                        user_org_tokens(org_name,candidate,input_token_cost_gemini_flash_2,output_token_cost_gemini_flash_2,mailid)
                        return JsonResponse({'response': only_files_response,'session_id':session_id})
                        
                        
                else:
                       return JsonResponse({
                           'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                           'session_id':session_id,
                           'flagged_categories': flagged_categories
                       })    
            
            elif action_type == 'action2':
                files = request.FILES.getlist('files')
                print("into action 2")

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)


                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                # ind_response=Image_indexing(upload_dir,'image_datas',conver_id)
                try:
                    image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
    
                    # Use glob to get all supported image files (jpg, jpeg, png, bmp)
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
    
                    # Iterate through each image file
                    for image_file in image_files:
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                    ind_response=1

                except Exception as e:
                    ind_response=0
                if ind_response==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})
                else:
                    res_msg='Unable to process image.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})

    except Exception as e:
            # logger.exception('Exception occured due to %s' % e)
            return JsonResponse({'error': str(e)})
      
 
def get_resptable(formatted_template):
    client = OpenAI()
    response = client.chat.completions.create(
        model="gpt-4o",
        messages=[{"role": "user", "content": formatted_template}],
        stream=False,
    )
    
    assistant_response = response.choices[0].message.content   
    return assistant_response
    
@csrf_exempt
def formscompare(request):
    try:
        if request.method=='POST':
            action_type = request.POST.get('operational_type')
            if action_type == 'action1':         
                custom_log("info", "into action1") 
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                user_prompt = request.POST.get('message')
                custom_log("prompt at allidocs", user_prompt)
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')
                retrieved_data = db.search(user_prompt, search_type='similarity')
               
                
                try:
                    update_create_at_with_sessionid(es_index_name, session_id)
                    is_safe, flagged_categories = moderate_input(user_prompt)
                    user_token = 0
                    try:
                        
                        add_user_message_to_es(candidate,username,session_id, user_prompt,es_index_name,user_token,'gemini-flash-2.0',is_safe, flagged_categories)
                    except Exception as e:
                        custom_log("error at 0", e)
                except Exception as e:
                    custom_log("error at 1", e)    


                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you.
                    You are a helpful assistant. Include the filename within your response where relevant.
                    Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                    Give a line of space between the paragraphs.

                    If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.

                    If questions are asked where there is no relevant context available, simply respond with:
                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"

                    Context: {context}  

                    {chat_history}  
                    Human: {question}  
                    Assistant:"""

                    

                    
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
        
                        
                                    
                    user_input = HumanMessage(content=[{"type": "text", "text": user_prompt}])


                    search_type = 'similarity'  # Adjust this based on your requirements
                                                    
                        # Use the search method with the search_type argument
                    retrieved_data = db.search(user_prompt, search_type=search_type)
                    formatted_data = [
                                {"type": "text", "text": item.page_content} 
                                for item in retrieved_data 
                                if item.page_content.strip() != '{"text": " "}'  # Only add if page_content is not just whitespace or empty
                            ]
                            # Create the AIMessage instance
                        
                    custom_log("formatted data",formatted_data)

                        
                    ret_chat = retrieve_chat_content(session_id,es_index_name)

                    custom_log("reieved data",ret_chat)
                       
                    

                    # Checking if the document with that chat_id and user_id, if not create a new list 
                    if ret_chat and isinstance(ret_chat, (list, dict)):
                                converted_chat = ret_chat
                                converted_chat = message_from_dict_list(converted_chat)
                    else:
                                converted_chat = []
                            
                    ai_message = HumanMessage(content=formatted_data)           

                    

                     

                    result = es.search(
                            index=forms_index,
                            query={"match": {"session_id": session_id}},
                            sort="lastmodified_at:asc",
                        )
                        
                    if result and len(result["hits"]["hits"]) > 0:
                            doc_items = [
                                json.loads(document["_source"]["history"])
                                for document in result["hits"]["hits"]
                            ]
                    else:
                            doc_items = []   

                    



                    uploaded_document_docs = []
                    if doc_items:
                            for i in doc_items:
                                page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""
            
                                # Ensure metadata is a dictionary
                                metadata = i[0].get('metadata')
                                if not isinstance(metadata, dict):                                
                                    try:
                                        metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                                    except (json.JSONDecodeError, TypeError):
                                        metadata = {}
            
                                uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))

                                

                    if formatted_data:
                            all_chat = converted_chat + [ai_message]+[user_input] + uploaded_document_docs
                    else:
                            all_chat = converted_chat + [user_input]  

                    clean_temp = '''Answer the question in your own words as truthfully as possible from the context given to you.
                                You are a helpful assistant. 
                                Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                                Give a line of space between the paragraphs.
                
                                If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.
                
                                If questions are asked where there is no relevant context available, simply respond with:
                                "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"
                                Remove ** or any ### from the response and don't mentions that in response about the formatting
                                        '''

                    all_chat.append(clean_temp)

                    custom_log("info", all_chat)
                
                
                    query = {
                        "query": {
                            "bool": {
                                "must": [
                                    {"term": {"username": candidate}},
                                ]
                            }
                        }
                    }
    
                    # Search the index for matching records
                    response = es.search(index=user_details_index, body=query)

                    assigned_tokens=0
                    overall_tokens=0
                    # Process the results
                    for hit in response["hits"]["hits"]:
                        assigned_tokens = hit["_source"].get("assigned_tokens", 0)
                        overall_tokens = hit["_source"].get("overall_tokens", 0)
                    
                    print('assigned_tokens',assigned_tokens)
                    print('overall_tokens',overall_tokens)

                    
                    all_chat = all_chat = ' '.join(doc.page_content for doc in uploaded_document_docs)

                    template = f"""Answer the question in your own words as truthfully as possible from the context given to you.
                                    You are a helpful assistant. Include the filename within your response where relevant.
                                    Response must be in and around 200 words. It must be in paragraph manner and it must not exceed 4 paragraphs.
                                    Give a line of space between the paragraphs.

                                    If there is extensive context, use as much as possible to form a detailed response around 200 words. If the context is minimal, provide a response based on the available information, without the need to meet the word limit.

                                    If questions are asked where there is no relevant context available, simply respond with:
                                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"

                                    Context: {all_chat}  

                                    {converted_chat}  
                                    Human: {user_prompt}  
                                    Assistant:"""
                    
                    custom_log("template",template )
                    assistant_response = text_generation_google_gemini_api(template)
                    assistant_response = assistant_response.get("candidates")[0].get("content").get("parts")[0].get("text")
                    custom_log("info", assistant_response)

                      

                    assistant_token = 0                   
                    add_assistant_message_to_es(candidate,username,session_id, assistant_response,assistant_token,'gemini-flash-2.0')
                    return JsonResponse({'response': assistant_response,'session_id':session_id})
                    
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })    
                
            elif action_type == 'action2':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/formsupload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                delete_document_content(forms_index,conver_id,'upload1')
                
                image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
                if image_files:
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
                    chain = ChatGoogleGenerativeAI(
                                    model="gemini-2.0-flash", 
                                )  
                    # Iterate through each image file
                    for image_file in image_files:
                        print('image_file',image_file)
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                        print('processed_image_path',processed_image_path)
                        # Encode the image
                        image = encode_image1(processed_image_path)
    
                        # Determine the image MIME type
                        if processed_image_path.endswith('.jpeg') or processed_image_path.endswith('.jpg'):
                            mime_type = 'image/jpeg'
                        elif processed_image_path.endswith('.png'):
                            mime_type = 'image/png'
                        else:
                            mime_type = 'image/jpeg'  # Default to JPEG if BMP was converted
    
                        # Invoke your chatbot with the message containing the image
                        msg = chain.invoke([
                            AIMessage(
                                content="You are a useful bot that is especially good at OCR from images"
                            ),
                            HumanMessage(
                                content=[
                                    {"type": "text", "text": "You are a useful bot that is especially good at OCR from images"},
                                    {
                                        "type": "image_url",
                                        "image_url": {
                                            "url": f"data:{mime_type};base64,{image}"
                                        },
                                    },
                                ]
                            )
                        ])
                        ind_response1=1
                        docs1=msg.content
                        print('img out',msg.content)
                else:         
                    ind_response1,docs1=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]
                
                if not isinstance(docs1, list):
                    # Wrap it in a list if it's not already
                    docs1 = [docs1]
    
                if docs1:
                    for i, doc in enumerate(docs1):
                        if isinstance(doc, str):  # Handle case where doc is a string
                            docs1[i] = {
                                "page_content": f"context of the first uploaded {file} image: {doc}",
                                "metadata": {"source": files_dir[0]},  # Basic metadata with source
                            }
                        elif hasattr(doc, "page_content"):  # Handle case where doc is an object
                            doc.page_content = {
                                f"context of the first uploaded {file} document": doc.page_content
                            }
                            

                    add_document_ai_to_es( username, conver_id, docs1,forms_index,'upload1',f"{file}")
                for f in files_dir:
                    os.remove(f)
                    
                if ind_response1==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload1 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            
            elif action_type == 'action3':
                files = request.FILES.getlist('files')
                conver_id=request.POST.get('session_id')
                username= request.POST.get('user')

                if not files:
                    return JsonResponse({'status': 'error', 'message': 'No files uploaded'}, status=400)

                upload_dir = os.path.join(os.path.dirname(__file__))+'/formsupload_files'
                if not os.path.exists(upload_dir):
                    os.makedirs(upload_dir)

                for file in files:
                    file_path = os.path.join(upload_dir, file.name)
                    with open(file_path, 'wb+') as destination:
                        for chunk in file.chunks():
                            destination.write(chunk)
                files_dir = glob(upload_dir+'/*')
                delete_document_content(forms_index,conver_id,'upload2')
                
                image_files = glob(upload_dir + '/*.jpg') + glob(upload_dir + '/*.jpeg') + glob(upload_dir + '/*.png') + glob(upload_dir + '/*.bmp')
                if image_files:
                    supported_formats = ['.jpg', '.jpeg', '.png', '.bmp']
                    image_files = []
                    for ext in supported_formats:
                        image_files.extend(glob(os.path.join(upload_dir, f'*{ext}')))
                    chain = ChatGoogleGenerativeAI(
                            model="gemini-2.0-flash", 
                        )  
                    # Iterate through each image file
                    for image_file in image_files:
                        # Resize and convert the image if necessary
                        processed_image_path = resize_and_convert_image(image_file)
                        # print('processed_image_path',processed_image_path)
                        # Encode the image
                        image = encode_image1(processed_image_path)
    
                        # Determine the image MIME type
                        if processed_image_path.endswith('.jpeg') or processed_image_path.endswith('.jpg'):
                            mime_type = 'image/jpeg'
                        elif processed_image_path.endswith('.png'):
                            mime_type = 'image/png'
                        else:
                            mime_type = 'image/jpeg'  # Default to JPEG if BMP was converted
    
                        # Invoke your chatbot with the message containing the image
                        msg = chain.invoke([
                            AIMessage(
                                content="You are a useful bot that is especially good at OCR from images and don't respond with these ** and ### special characters"
                            ),
                            HumanMessage(
                                content=[
                                    {"type": "text", "text": "You are a useful bot that is especially good at OCR from images"},
                                    {
                                        "type": "image_url",
                                        "image_url": {
                                            "url": f"data:{mime_type};base64,{image}"
                                        },
                                    },
                                ]
                            )
                        ])
                        ind_response2=1
                        docs2=msg.content
                else:         
                    ind_response2,docs2=add_documents(files_dir[0])
                file=files_dir[0].split('/')[-1]

                if not isinstance(docs2, list):
                    # Wrap it in a list if it's not already
                    docs2 = [docs2]
    
                if docs2:
                    for i, doc in enumerate(docs2):
                        if isinstance(doc, str):  # Handle case where doc is a string
                            docs2[i] = {
                                "page_content": f"context of the first uploaded {file} image: {doc}",
                                "metadata": {"source": files_dir[0]},  # Basic metadata with source
                            }
                        elif hasattr(doc, "page_content"):  # Handle case where doc is an object
                            doc.page_content = {
                                f"context of the first uploaded {file} document": doc.page_content
                            }

                    add_document_ai_to_es( username, conver_id, docs2,forms_index,'upload2',f"{file}")
                
                for f in files_dir:
                    os.remove(f)
                if ind_response2==1:
                    res_msg=str(len(files))+' uploads complete'
                    return JsonResponse({'status': 'success', 'message':res_msg})

                else:
                    res_msg='unable to process upload2 documents.'
                    return JsonResponse({'status': 'failed', 'message':res_msg})
            
            elif action_type == 'action4':          
                username = request.POST.get('username')
                session_id = request.POST.get('session_id')
                candidate =request.POST.get('candidate_name')
                org_name=request.POST.get('organization_name')
                mailid=request.session.get('email')
                
                user_prompt = """Extract the following information from each uploaded document and return the output strictly in a table format only, with column names as the document names and row fields as: Policy Number, Policy Effective Date, Policy Issue Date, Schedule of Forms and Endorsements, Common Policy Declarations and Conditions, Declarations, Coverage, and Endorsements.

                Ensure the following:  
                1. The Policy Number format must strictly follow the pattern of two alphabetic characters, a space, followed by two digits, a space, repeated thrice (e.g., AG 00 02 30 06). The Policy Number is typically found in the header or footer of the document. If the policy number field appears empty, perform a thorough search across the document, including all sections (header, footer, body text, metadata, etc.), to ensure the policy number is identified and extracted wherever it exists.
                2. If any field is missing or not found in a document, include only the data from the fields that are present. Use as much detail as possible, up to 100 words, in each field that is available. Leave missing fields blank in the table.  

                For Specific Fields:  
                - Schedule of Forms and Endorsements: Get data based on the list of policy forms, schedules, and endorsements by line of business.  
                - Declarations: Include line of coverage-specific declarations (general liability, business auto, business income, contractors equipment, etc.).  
                - Coverages: Provide a comparison of coverages offered by the underlying policy (general liability, business auto, business income, contractors equipment, etc.).  
                - Endorsements: List the name of each endorsement included in the document with a summary of how each endorsement affects the underlying policy.  

                Response Format:  
                - Provide the extracted data in a table format as described above.  
                - Include an "Overall Summary" section directly under the table. This summary must summarise the table data's important observations across the documents, titled as **summary**.
                """


                update_create_at_with_sessionid(es_index_name, session_id)
                is_safe, flagged_categories = moderate_input(user_prompt)
                is_safe = True
                flagged_categories = True
                user_token = "987"
                add_chat_message_to_es(candidate,username,session_id, user_prompt,es_index_name,user_token,'gemini-flash-2.0',is_safe)
                
                
                if is_safe:
                    template = """Answer the question in your own words as truthfully as possible from the context given to you.  
                    You are a helpful assistant. Include the filename within your response where relevant.  
                    Extract the following information from each uploaded document and return the output strictly in a table format only.  

                    Table Format Requirements:  
                    - The table must have a header row with "Field Name" in the first column, followed by the uploaded document names as subsequent column headers (e.g., "Uploaded Document 1 Name" and "Uploaded Document 2 Name").  
                    - Row fields should include the actual values for the following fields: Policy Number, Policy Effective Date, Policy Issue Date, Schedule of Forms and Endorsements, Common Policy Declarations and Conditions, Declarations, Coverage, and Endorsements.  

                    Ensure the following:  
                    1.The Policy Number must follow one of the patterns below:
                    - Two alphabetic characters, a space, followed by two digits, a space, repeated thrice (e.g., AG 00 02 30 06).
                    - Three alphabetic characters, a space, followed by two digits, a space, repeated twice (e.g., CNI 90 22 11 22).
                    - Two alphabetic characters, a space, another two alphabetic characters, a space, followed by two digits, a space, and repeated once (e.g., CG DS 01 10 01).
                    - Two alphabetic characters, a space, one alphabetic character, a space, followed by three digits, a space, then two digits, a space, and two digits (e.g., IL U 002 05 10).
                      The Policy Number is usually found in the header or footer of the document. Ensure to search these sections carefully when extracting the number.
                    2. If any field is missing or not found in a document, include only the data from the fields that are present. Use as much detail as possible, up to 100 words, in each field that is available. Leave missing fields blank in the table.  
                    3. Perform a comparison for each field across all uploaded documents to ensure consistency.

                    For Specific Fields:  
                    - Schedule of Forms and Endorsements: Get data based on the list of policy forms, schedules, and endorsements by line of business.  
                    - Declarations: Include line of coverage-specific declarations (general liability, business auto, business income, contractors equipment).  
                    - Coverages: Provide a comparison of coverages offered by the underlying policy (general liability, business auto, business income, contractors equipment).  
                    - Endorsements: List the name of each endorsement included in the document with a summary of how each endorsement affects the underlying policy.  

                    Response Format:  
                    - Provide the extracted data in a table format as described above.  
                    - Include an "Overall Summary" section directly under the table. This summary must summarise the table data's important observations across the documents, titled as summary.  

                    Additional Note:  
                    If questions are asked where there is no relevant context available, simply respond with:  
                    "That's a great question! I'm not sure of the answer right now. Can you ask your question a different way? I am excited to assist you further!"

                    Context: {context}  

                    {chat_history}  
                    Assistant:"""


                    
                    memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')
         
                               
                    result = es.search(
                        index=forms_index,
                        query={"match": {"session_id": session_id}},
                        sort="lastmodified_at:asc",
                    )
                    
                    if result and len(result["hits"]["hits"]) > 0:
                        doc_items = [
                            json.loads(document["_source"]["history"])
                            for document in result["hits"]["hits"]
                        ]
                    else:
                        doc_items = []   
                        

                    count = 0
                    docs = {}

                    for i in doc_items:
                        count += 1
                        doc_page = []
                        
                        for j in i:
                            content = j.get('page_content', {})  # Ensure it's a dictionary
                            meta_data = j.get('metadata',{})
                            parsed_data = json.loads(meta_data)  # Convert string to dictionary
                            meta_data = parsed_data.get("source")  # Extract "source" key value
                            
                            if isinstance(content, dict):
                                value = next((v for v in content.values() if isinstance(v, str)), '')  # Extract first string value
                            else:
                                value = str(content)  # Convert non-dict content to string
                            
                            # print(value)
                            doc_page.append(value)

                        # docs[f'doc_{count}'] = doc_page  # Store the extracted text
                        docs[f'doc_{count}'] = {"content" : "\n\n".join(doc_page) ,
                                                'meta_data': meta_data}

                       
                    # Extract table structure from Excel
                    excel_path = "/var/www/html/AlliBot_30/table structure.xlsx"
                    table_structure = extract_reference_table(excel_path)

                    # Extract structured data for each PDF
                    pdf1_text = docs.get('doc_1').get('content')
                    pdf2_text = docs.get('doc_2').get('content')
                    print("pdf passed")

                    pdf1_name = docs.get('doc_1').get('meta_data')
                    pdf2_name = docs.get('doc_2').get('meta_data')
                    print("pdf name passed")

                    # Compare the extracted tables
                    comparison_prompt = f"""
                    The extracted structured data for the file name 1 - {pdf1_name}:
                    {pdf1_text}

                    The extracted structured data for the file name 2 - {pdf2_name}:
                    {pdf2_text}

                    Compare both documents and highlight differences and give me a table like response as in this excel {table_structure}.
                    - start with 'Here’s a comparison of the two policies:' and don't mention anything than that. 
                    - Don't mention 'based on the structured data provided.' or anything based on the structure in the response.
                    - give me the name of the file names along with the extesion of the file too.
                    - Don't provide me the file name exclusively in a different field instead give as a topic of a column
                    - Give me the summary of the 2 policies in the last row of the table - Important Don't miss this
                    - give me the response in html and inside the cell give me proper line spacing.
                    - Give the form numbers in seperate lines.
                    - In the ‘Schedule of Forms and Endorsements’ row, list each form number on a separate line.
                    """

               

                    assistant_response = text_generation_google_gemini_api(comparison_prompt)
                    assistant_response = assistant_response.get("candidates")[0].get("content").get("parts")[0].get("text")

                   
                    assistant_response = assistant_response.replace("Here’s a comparison of the two policies:", "")
                    assistant_response = assistant_response.replace("```html", "")
                    assistant_response = assistant_response.replace("```", "")
                                   
                    add_system_message_to_es(candidate,username,session_id,assistant_response)
                    
                    return JsonResponse({'response': assistant_response,'session_id':session_id})                    
                    
                else:
                    return JsonResponse({
                        'response': "The Question you asked violates AlliBot usage policies.Don't repeat this from blocking of AlliBot account.",
                        'session_id':session_id,
                        'flagged_categories': flagged_categories
                    })     
                
    except Exception as e:
        custom_log("error",f"error at formscompare: {e}")
        return JsonResponse({'error': str(e)})
    
    


@csrf_exempt
def activate_query_conversation(request):
    if request.method == "POST":
        try:    
            return JsonResponse({"activate_conv": True})

        except Exception as e:
            # Log the error and return an error response
            # custom_log("info","check token index %s" % e)
            return JsonResponse({"error": str(e)}, status=500)
    else:
        # Return a method not allowed error for non-POST requests
        return JsonResponse({"error": "Method not allowed"}, status=405)



@csrf_exempt
def update_thumbs_flag(request):
# Search for the document using session ID and answer field
    try:
        # session_id,index_name,answer,
        if request.method == 'POST':
            print('thumbs flag')
            session_id = request.POST.get('session_id')
            index_name =request.POST.get('indexname')
            answer=request.POST.get('answer')
            thumbs_flag=request.POST.get('thumbs_flag')
            sleep(2)
            
            # custom_log("info",'index_name%s'%index_name)
            # custom_log("info",'session_id%s'%session_id)

            response = es.search(
                index=index_name,
                body={
                    "query": {"term": {"session_id.keyword": session_id}},
                    "size": 1000  # Adjust size to the number of documents you expect, e.g., 1000
                }
            )
            
            # print('response length',len(response))

            # Check if any documents are found
            total_docs = response['hits']['total']['value']
            # custom_log("info",'total_docs%s'%total_docs)
            if total_docs > 0:
                for hit in response['hits']['hits']:
                    doc_id = hit['_id']  # Get document ID
                    if 'answer' in hit['_source'] :
                        doc_source = hit['_source']['answer']  # Get the document source
                        if doc_source==answer:
                            # print('yes')
                            es.update(
                                index=index_name,
                                id=doc_id,
                                body={
                                    "doc": {
                                        "feedback": thumbs_flag,  # Update the feedback flag with the value 'good', 'bad', or 'no feedback'
                                    },
                                    "doc_as_upsert": True  # Create the document if it doesn't exist
                                }
                            )
                return JsonResponse({'status': 'success', 'message': 'Document updated'})
    
            else:
                return JsonResponse({'status': 'failure', 'message': 'No documents found'}, status=404)
        
    except NotFoundError:
        print(f"Error: No documents found for session_id {session_id} and answer '{answer}'.")

@csrf_exempt
def compforms_lettergen(request):
    if request.method == 'POST':
        session_id = request.POST.get('session_id')
        candidate =request.POST.get('candidate_name')
        org_name=request.POST.get('organization_name')
        mailid=request.session.get('email')      
        
        user_prompt = """Analyze the information provided from both ISO forms and generate a letter emphasizing the key similarities, differences, and insights. Ensure the letter clearly reflects the comparison, maintaining adherence to the formats specified in the ISO forms. Include the filename(s) of the ISO forms within your response where relevant."""


        update_create_at_with_sessionid(es_index_name, session_id)
        input_token_cost_gpt4o = num_tokens_from_string(user_prompt, 'cl100k_base')
        
        template = """Analyze the information provided from both ISO forms and generate a letter emphasizing the key similarities, differences, and insights. Ensure the letter clearly reflects the comparison, maintaining adherence to the formats specified in the ISO forms. Include the filename(s) of the ISO forms within your response where relevant.  

        Context: {context}  

        {chat_history}  
        Assistant:"""
        
        
        
        memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True, output_key='answer')

                   
        result = es.search(
            index=forms_index,
            query={"match": {"session_id": session_id}},
            sort="lastmodified_at:asc",
        )
        
        if result and len(result["hits"]["hits"]) > 0:
            doc_items = [
                json.loads(document["_source"]["history"])
                for document in result["hits"]["hits"]
            ]
        else:
            doc_items = []   
            
        uploaded_document_docs = []
        if doc_items:
            for i in doc_items:
                page_content = str(i[0].get('page_content')) if i[0].get('page_content') else ""

                # Ensure metadata is a dictionary
                metadata = i[0].get('metadata')
                if not isinstance(metadata, dict):
                    # Handle the case where metadata is not a dict
                    # Example: convert to dict if it's a stringified JSON
                    try:
                        metadata = json.loads(metadata) if isinstance(metadata, str) else {}
                    except (json.JSONDecodeError, TypeError):
                        metadata = {}

                uploaded_document_docs.append(BaseDocument(page_content=page_content, metadata=metadata))
    
        # combined_docs = uploaded_document_docs + primary_docs_text
        combined_docs = uploaded_document_docs
        # custom_log("info",'combined_docs%s'%combined_docs)


        memory_variables = memory.load_memory_variables({"input": ""})  # Use an empty input since we only need chat history
        chat_history = memory_variables['chat_history']
        formatted_template = template.format(
            context=combined_docs, 
            chat_history=chat_history,
            # question=user_prompt
        )
        
        assistant_response=get_resptable(formatted_template)
        print('assistant_response',assistant_response)
        output_token_cost_gpt4o =  num_tokens_from_string(assistant_response,'cl100k_base') 
        
        query = {
            "query": {
                "bool": {
                    "must": [
                        # {"term": {"user_id.keyword": alliance_number}},
                        {"term": {"username": candidate}},
                    ]
                }
            }
        }

        # Search the index for matching records
        response = es.search(index=user_details_index, body=query)

        assigned_tokens=0
        overall_tokens=0
        # Process the results
        for hit in response["hits"]["hits"]:
            assigned_tokens = hit["_source"].get("assigned_tokens", 0)
            overall_tokens = hit["_source"].get("overall_tokens", 0)
        
        # custom_log("info",'assigned_tokens %s'%assigned_tokens)
        # custom_log("info",'overall_tokens %s'%overall_tokens)
        # custom_log("info",'output_token_cost_gpt4o %s'%output_token_cost_gpt4o)

        # if assigned_tokens-(overall_tokens+output_token_cost_gpt4o+input_token_cost_gpt4o) <0:
        #     print('if satisfied',candidate)
        #     print('output_token_cost_gpt4o',output_token_cost_gpt4o)

        #     response = es.search(
        #     index=user_details_index,
        #     body={
        #             "query": {
        #                 "bool": {
        #                     "must": [
        #                         # {"term": {"user_id.keyword": alliance_number}},
        #                         {"term": {"username": candidate}},
        #                     ]
        #                 }
        #             },
        #         }
        #     )
        
        #     if response['hits']['total']['value'] > 0:
        #         userdoc_id = response['hits']['hits'][0]['_id']
        #         print('userdoc_id',userdoc_id)
        #         # Use script to either add or update 'limits_token'
        #         es.update(
        #             index=user_details_index,
        #             id=userdoc_id,
        #             body={
        #                 "script": {
        #                     "source": """
        #                         if (ctx._source.containsKey('limits_token')) {
        #                             // If limits_token exists, update its value
        #                             ctx._source.limits_token = params.limits_token;
        #                         } else {
        #                             // If limits_token doesn't exist, create it and set the value
        #                             ctx._source['limits_token'] = params.limits_token;
        #                         }
        #                     """,
        #                     "params": {
        #                         "limits_token": 0
        #                     }
        #                 }
        #             }
        #         )

            
        #     # Search the index for matching records
        #     response = es.search(index=user_details_index, body=query)

        #     return JsonResponse({'response': 'Your token limit has been reached.','session_id':session_id})
        # else:
            # custom_log("info",'output_token_cost_gpt4o to sys es %s'%output_token_cost_gpt4o)
            #org_token(input_token_cost_gpt4o, output_token_cost_gpt4o, org_name)
            # custom_log("info",'output_token_cost_gpt4o org es %s'%output_token_cost_gpt4o)
            #user_org_tokens(org_name,candidate,input_token_cost_gpt4o,output_token_cost_gpt4o,mailid)
        return JsonResponse({'response': assistant_response,'session_id':session_id})   
    return JsonResponse({'error': 'Invalid request method'}, status=405)

@csrf_exempt
def text_to_speech(request):
    if request.method == 'POST':
        input_text = request.POST.get('message')
        if not input_text:
            return JsonResponse({'error': 'No input text provided'}, status=400)

        try:
            # Fetch TTS from OpenAI
            response = openai.audio.speech.create(
                model="tts-1",
                voice="alloy",
                input=input_text
            )
            # Stream the audio content to memory
            audio_content = response.content
            # Convert the audio content to base64
            base64_encoded_audio = base64.b64encode(audio_content).decode('utf-8')

            return JsonResponse({'audio': base64_encoded_audio})
        except Exception as e:
            return JsonResponse({'error': str(e)}, status=500)
    return JsonResponse({'error': 'Invalid request method'}, status=405)


@csrf_exempt
def speech_to_text(request):
    if request.method == 'POST':
        if 'file' in request.FILES:
            audio_file = request.FILES['file']
            try:
                # Convert InMemoryUploadedFile to bytes
                audio_bytes = audio_file.read()

                client = OpenAI()
                transcription = client.audio.transcriptions.create(
                    model="whisper-1", 
                    file=(audio_file.name, audio_bytes),
                    response_format="text"
                )
                return JsonResponse({'transcription': transcription})
            except Exception as e:
                return JsonResponse({'error': str(e)}, status=500)
        else:
            return JsonResponse({'error': 'No file provided'}, status=400)
    else:
        return JsonResponse({'error': 'Invalid request method'}, status=405)


def today_messages(request):
    try:
        t=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_today <= lastmodified_at_datetime <= current_datetime:
                t.append(item)
        # print('to',t)
        if t:
            #session_ids = list(set(d["session_id"] for d in t))
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        
                        for j in document["_source"]["messages"]:
                                # custom_log("info",'retrived data %s'%j)
                                for i in j['data']['content']:
                                    # custom_log("info",'second retrived %s'%i)
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'

                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'

                        }
                        
                        items.append(yest_dict)
            elif username =='forms_log':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    print('id_user',id_user)
                    result1 = es.search(
                        index=forms_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        # print('session_hist',session_hist)
                        for i in session_hist[1:]:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == SystemMessage:
                                 chat_h= {
                                     'system': i.content}
                            elif type(i) == AIMessage:
                                  chat_h= {
                                      'assistant': i.content}

                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'forms_log'
                        }
                        items.append(yest_dict)
                        
            else:
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in t))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'
                        }
                        
                        items.append(yest_dict)
                

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # custom_log("info",'today sec error %s'%e)
        # Log the error and return a JSON error response
        return JsonResponse({"error": str(e)}, status=500)

def yesterday_messages(request):
    try:
        y=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)
        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        end_of_yesterday = start_of_today - timedelta(microseconds=1) 
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_yesterday <= lastmodified_at_datetime <= end_of_yesterday:
                y.append(item)
        if y:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        for j in document["_source"]["messages"]:
                                for i in j['data']['content']:
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'
                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                    
                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)
                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'
                        }
                        
                        items.append(yest_dict)
            elif username =='forms_log':
                 session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
                 yest_dict = {}
                 items=[]
                 for id_user in session_ids[::-1]:
                     print('id_user',id_user)
                     result1 = es.search(
                         index=forms_index,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result1 and len(result1["hits"]["hits"]) > 0:
                         session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                     
                     tot_hist=[]
                     chat_h={}
                     for i in session_hist1:
                         chat_h = {
                             'file': i}
                         tot_hist.append(chat_h)
                     
                     result = es.search(
                         index=es_index_name,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result and len(result["hits"]["hits"]) > 0:
                         session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                           for document in result["hits"]["hits"])
     
                         # tot_hist=[]
                         # chat_h={}
                         # print('session_hist',session_hist)
                         for i in session_hist[1:]:
                             if type(i) == HumanMessage:
                                 chat_h = {
                                     'user': i.content}
                             elif type(i) == SystemMessage:
                                  chat_h= {
                                      'system': i.content}
                             elif type(i) == AIMessage:
                                   chat_h= {
                                       'assistant': i.content}

                             tot_hist.append(chat_h)
                         
                         chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
     
                         yest_dict = {
                             'session_id': id_user,
                             'chat_history':tot_hist,
                             'content': chat_id_names,
                             'app_switch':'forms_log'
                         }
                         items.append(yest_dict)
            else:
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in y))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'

                        }
                        
                        items.append(yest_dict)
                   

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}
        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        # custom_log("info",'yesterday sec error %s'%e)

        return JsonResponse({"error": str(e)}, status=500)
    
def previous_7_days(request):
    try:
        s=[]
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username =='image_logs':
            test = get_messages_by_user_and_session(username,imtext_index,customer)
        else:
            test = get_messages_by_user_and_session(username,es_index_name,customer)        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and yesterday in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_yesterday = start_of_today - timedelta(days=1)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)     
        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_seven_days_ago <= lastmodified_at_datetime < start_of_yesterday:
                s.append(item)

        if s:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        for j in document["_source"]["messages"]:
                                for i in j['data']['content']:
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    yest_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'

                    }
                    
                    items.append(yest_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        # custom_log("info",'do ret %s'%result1["hits"]["hits"])
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]

                    #     session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"] if 'filename' in document["_source"]]
                    # else:
                    #     session_hist1=[]
                    

                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)

                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )

                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'

                        }
                        
                        items.append(yest_dict)

            elif username =='forms_log':
                 session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                 yest_dict = {}
                 items=[]
                 for id_user in session_ids[::-1]:
                     print('id_user',id_user)
                     result1 = es.search(
                         index=forms_index,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result1 and len(result1["hits"]["hits"]) > 0:
                         session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                     
                     tot_hist=[]
                     chat_h={}
                     for i in session_hist1:
                         chat_h = {
                             'file': i}
                         tot_hist.append(chat_h)
                     
                     result = es.search(
                         index=es_index_name,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result and len(result["hits"]["hits"]) > 0:
                         session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                           for document in result["hits"]["hits"])
     
                         # tot_hist=[]
                         # chat_h={}
                         # print('session_hist',session_hist)
                         for i in session_hist[1:]:
                             if type(i) == HumanMessage:
                                 chat_h = {
                                     'user': i.content}
                             elif type(i) == SystemMessage:
                                  chat_h= {
                                      'system': i.content}
                             elif type(i) == AIMessage:
                                   chat_h= {
                                       'assistant': i.content}

                             tot_hist.append(chat_h)
                         
                         chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
     
                         yest_dict = {
                             'session_id': id_user,
                             'chat_history':tot_hist,
                             'content': chat_id_names,
                             'app_switch':'forms_log'
                         }
                         items.append(yest_dict)
                         
            else:
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
    
                yest_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        yest_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'

                        }
                        
                        items.append(yest_dict)
                   

            # Returning the first message content and the session ID for simplicity
            formatted_yest_dict = {
                "messages": items
            }
        else:
            formatted_yest_dict = {"messages": []}

        return  JsonResponse({'messages': formatted_yest_dict})

    except Exception as e:
        # Log the error and return a JSON error response
        # custom_log("info",'previous 7 days error %s'%e)
        return JsonResponse({"error": str(e)}, status=500)


@csrf_exempt
def previous_30_days(request):
    try:
        s = []
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        if username == 'image_logs':
            test = get_messages_by_user_and_session(username, imtext_index, customer)
        else:
            test = get_messages_by_user_and_session(username, es_index_name, customer)

        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)

        # Calculate the start of today, 7 days ago, and 30 days ago in UTC
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_seven_days_ago = start_of_today - timedelta(days=7)
        start_of_thirty_days_ago = start_of_today - timedelta(days=30)

        for item in test:
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            if start_of_thirty_days_ago <= lastmodified_at_datetime < start_of_seven_days_ago:
                s.append(item)

        if s:
            if username =='image_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                pre_30_days_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=imtext_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    
                    tot_hist=[]
                    imtex={}
                    for document in result["hits"]["hits"]:
                        for j in document["_source"]["messages"]:
                                for i in j['data']['content']:
                                    if i['type']=='ai':
                                        imtex={'assistant': i['data'][0]['text']}
                                    elif i['type']=='system':
                                        imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                    elif i['type']=='human':
                                        if 'Filename:' in i['data'][0]['text']:
                                            filename=i['data'][0]['text'].split('Filename:')[-1].strip()
                                            imtex = {'file':filename}
                                        elif len(i['data'])==2 and 'image_url' in i['data'][1]:
                                            imtex={'system': "data:image/jpeg;base64" + i['data'][1]['image_url']['url']}
                                        else:
                                            imtex = {'user': i['data'][0]['text']}

                                    tot_hist.append(imtex)
                    tot_hist = [item for item in tot_hist if item]
                    

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)

                    pre_30_days_dict = {
                        'session_id': id_user,
                        'chat_history':tot_hist,
                        'content': chat_id_names,
                        'app_switch':'image_logs'

                    }
                    
                    items.append(pre_30_days_dict)
            
            elif username =='doc_logs':
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                pre_30_days_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    
                    result1 = es.search(
                        index=doc_index,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result1 and len(result1["hits"]["hits"]) > 0:
                        # custom_log("info",'do ret %s'%result1["hits"]["hits"])
                        session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]

                    #     session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"] if 'filename' in document["_source"]]
                    # else:
                    #     session_hist1=[]
                    

                    tot_hist=[]
                    chat_h={}
                    for i in session_hist1:
                        chat_h = {
                            'file': i}
                        tot_hist.append(chat_h)

                    
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )

                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        # tot_hist=[]
                        # chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
    
                        pre_30_days_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'doc_logs'

                        }
                        
                        items.append(pre_30_days_dict)

            elif username =='forms_log':
                 session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
                 pre_30_days_dict = {}
                 items=[]
                 for id_user in session_ids[::-1]:
                     print('id_user',id_user)
                     result1 = es.search(
                         index=forms_index,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result1 and len(result1["hits"]["hits"]) > 0:
                         session_hist1 = [document["_source"]["filename"] for document in result1["hits"]["hits"]]
                     
                     tot_hist=[]
                     chat_h={}
                     for i in session_hist1:
                         chat_h = {
                             'file': i}
                         tot_hist.append(chat_h)
                     
                     result = es.search(
                         index=es_index_name,
                         query={"match": {"session_id": id_user}},  # Use match instead of term
                         sort="lastmodified_at:asc",
                     )
                     if result and len(result["hits"]["hits"]) > 0:
                         session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                           for document in result["hits"]["hits"])
     
                         # tot_hist=[]
                         # chat_h={}
                         # print('session_hist',session_hist)
                         for i in session_hist[1:]:
                             if type(i) == HumanMessage:
                                 chat_h = {
                                     'user': i.content}
                             elif type(i) == SystemMessage:
                                  chat_h= {
                                      'system': i.content}
                             elif type(i) == AIMessage:
                                   chat_h= {
                                       'assistant': i.content}

                             tot_hist.append(chat_h)
                         
                         chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
     
                         pre_30_days_dict = {
                             'session_id': id_user,
                             'chat_history':tot_hist,
                             'content': chat_id_names,
                             'app_switch':'forms_log'
                         }
                         items.append(pre_30_days_dict)
                         
            else:
                # session_ids = list(set(d["session_id"] for d in y))
                session_ids=list(OrderedDict.fromkeys(d["session_id"] for d in s))
    
                pre_30_days_dict = {}
                items=[]
                for id_user in session_ids[::-1]:
                    result = es.search(
                        index=es_index_name,
                        query={"match": {"session_id": id_user}},  # Use match instead of term
                        sort="lastmodified_at:asc",
                    )
                    if result and len(result["hits"]["hits"]) > 0:
                        session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                          for document in result["hits"]["hits"])
    
                        tot_hist=[]
                        chat_h={}
                        for i in session_hist:
                            if type(i) == HumanMessage:
                                chat_h = {
                                    'user': i.content}
                            elif type(i) == AIMessage:
                                 chat_h= {
                                     'assistant': i.content}
                            tot_hist.append(chat_h)
                        chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                        pre_30_days_dict = {
                            'session_id': id_user,
                            'chat_history':tot_hist,
                            'content': chat_id_names,
                            'app_switch':'alli_log'

                        }
                        
                        items.append(pre_30_days_dict)

            formatted_previous30days_dict = {"messages": items}
        else:
            formatted_previous30days_dict = {"messages": []}

        return JsonResponse({'messages': formatted_previous30days_dict})

    except Exception as e:
        # custom_log("info",'previous_30_days error: %s' % e)
        return JsonResponse({"error": str(e)}, status=500)       

@csrf_exempt
def older_than_30_days(request):
    try:
        s = []
        username = request.headers.get('X-Username')
        customer = request.headers.get('X-Candidate-Name')

        # Select the appropriate index based on the username
        if username == 'image_logs':
            test = get_messages_by_user_and_session(username, imtext_index, customer)
        else:
            test = get_messages_by_user_and_session(username, es_index_name, customer)

        # Get current date and time in UTC
        current_datetime = datetime.now(timezone.utc)
        
        # Calculate the start of today and 30 days ago
        start_of_today = current_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
        start_of_thirty_days_ago = start_of_today - timedelta(days=30)
        
        # Collect items older than 30 days
        for item in sorted(test, key=lambda x: x['lastmodified_at']):
            lastmodified_at_datetime = datetime.fromtimestamp(item['lastmodified_at'] / 1000, tz=timezone.utc)
            
            # Retrieve data older than 30 days
            if lastmodified_at_datetime < start_of_thirty_days_ago:
                s.append(item)
        
        # Sort by lastmodified_at in descending order (latest first)
        s = sorted(s, key=lambda item: item['lastmodified_at'], reverse=True)

        # Pagination parameters
        offset = int(request.GET.get('offset', 0))
        limit = int(request.GET.get('limit', 10))

        # Paginate the results if needed
        paginated_older_messages = s[offset:min(offset + limit, len(s))]
        paginated_older_messages = paginated_older_messages[::-1]  # Reverse for chronological order

        has_more = len(s) >= min(offset + limit, len(s))
        
        # Process session data
        if paginated_older_messages:
            session_ids = list(OrderedDict.fromkeys(d["session_id"] for d in paginated_older_messages))
            items = []
            for id_user in session_ids[::-1]:
                result = es.search(
                    index=es_index_name,
                    query={"match": {"session_id": id_user}},
                    sort="lastmodified_at:asc",
                )
                if result and len(result["hits"]["hits"]) > 0:
                    session_hist = messages_from_dict(json.loads(document["_source"]["history"])
                                                      for document in result["hits"]["hits"])

                    tot_hist = []
                    for i in session_hist:
                        chat_h = {'user': i.content} if isinstance(i, HumanMessage) else {'assistant': i.content}
                        tot_hist.append(chat_h)

                    chat_id_names = get_chat_id_names_by_session_id(chatname_index, id_user)
                    items.append({
                        'session_id': id_user,
                        'chat_history': tot_hist,
                        'content': chat_id_names
                    })

            formatted_older_dict = {"messages": items, "has_more": has_more}
        else:
            formatted_older_dict = {"messages": [], "has_more": False}

        return JsonResponse({'messages': formatted_older_dict})

    except Exception as e:
        print(f"Error in older_than_30_days view: {e}")
        return JsonResponse({"error": str(e)}, status=500)         
