import openai
from decouple import config
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from elasticsearch import Elasticsearch
import pymsteams
import sys,os
from datasets import Dataset 
from ragas.metrics import answer_relevancy,faithfulness
from ragas import evaluate
from langchain.chat_models import ChatOpenAI
import requests
import tiktoken

from requests.auth import HTTPBasicAuth
# myTeamsMessage = pymsteams.connectorcard("https://qubespacein.webhook.office.com/webhookb2/4aca0511-270e-4729-a2c0-acffe9e0a9f5@d996ac79-e80a-4fc8-be1a-8f891b569988/IncomingWebhook/35a3fab06f984bc7ae5665a6dbba50a9/1db04433-eddc-4dcb-9620-12fd2cb80285")
sys.path.append(os.path.join(os.path.dirname(__file__))+'/per_logdir/')
myTeamsMessage = pymsteams.connectorcard("https://qubespacein.webhook.office.com/webhookb2/4aca0511-270e-4729-a2c0-acffe9e0a9f5@d996ac79-e80a-4fc8-be1a-8f891b569988/IncomingWebhook/8e0b6835e085450d83ddb41ed4f64e7d/b187de49-aa7e-4d44-8967-d6c7f25ae53e/V25Ea18EbPQIjnJ4K-Ju8byBC9W5rANDfcpyFYxAdZpn01")# import numpy as np
import math
from office365.runtime.auth.client_credential import ClientCredential
from office365.sharepoint.client_context import ClientContext
import datetime

from percentage_log import LogUtils
logger = LogUtils.getRootLogger()

es= Elasticsearch(config('ES_URL'),request_timeout=300,retry_on_timeout=True)
es_index_name =config("es_index_name")
organ_token_ind=config("riea_token_index")
def format_file_paths(file_paths_str):
    # Split the input string by comma to get individual file paths
    file_paths = file_paths_str.split(", ")
    
    # Initialize an empty list to store the formatted file paths
    formatted_paths = []
    
    # Iterate through the file paths and format them with numbers
    for idx, path in enumerate(file_paths, start=1):
        formatted_paths.append(f"{idx}. {path}")
    
    # Join the formatted paths into a single string separated by newlines
    result = '\n'.join(formatted_paths)
    
    return result



print('ind',es_index_name)
user_prompt=sys.argv[1]
answer=sys.argv[2]
context=sys.argv[3].split('_lc_kwargs')
meta_data_source = sys.argv[4]
document_id_user = sys.argv[5]
organ_id_user = sys.argv[6]


print('document_id_user',document_id_user)

formatted_file_paths = format_file_paths(meta_data_source)
# logger.info('documnet links %s' % formatted_file_paths)
# logger.info(contactId)


def split_day_into_intervals(interval_hours):
    intervals = []
    start_time = datetime.datetime.strptime("00:00", "%H:%M")
    while start_time < datetime.datetime.strptime("23:59", "%H:%M"):
        end_time = start_time + datetime.timedelta(hours=interval_hours)
        intervals.append((start_time, end_time))
        start_time = end_time
    return intervals

def get_current_interval(interval_hours=2):
    current_time = datetime.datetime.utcnow().time()
    intervals = split_day_into_intervals(interval_hours)
    for idx, (start, end) in enumerate(intervals):
        if start.time() <= current_time <= end.time():
            return idx + 1, start.strftime("%H:%M"), end.strftime("%H:%M")
    return None


sharp_path='https://scic.sharepoint.com/d365integration/Shared%20Documents/Chat%20Bot%20Export/excel/'
sharepoint_var='\n Please refer this sharepoint link for botcoaching powerbi latest report\n'+sharp_path+'\n'

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False
    
def send_email(subject, message, To):
    from_email = 'allibot@scic.com'  # Your Outlook email address
    password = 'IgX8N7ruvFjWU6U!' # Your Outlook app password (if using two-factor authentication)
    msg = MIMEMultipart()
    msg['From'] = from_email
    msg['To'] = ",".join(To)
    Mail_address=To
    
    msg['Subject'] = subject
    msg.attach(MIMEText(message, 'plain'))
    # Outlook SMTP server settings
    smtp_server = 'smtp.office365.com'
    smtp_port = 587
    try:
        server = smtplib.SMTP(smtp_server, smtp_port)
        server.starttls()  # Start TLS encryption
        server.login(from_email, password)
        text = msg.as_string()
        server.sendmail(from_email, Mail_address, text)
        # print("Email sent successfully.")
    except Exception as e:
        print(f"Error sending email: {str(e)}")
        logger.exception('Exception occured due to %s' % e)

    finally:
        server.quit()
            
def check_field_existence(index_name, doc_id, field_name):
    # Perform a search query to retrieve the document
    try:
        document = es.get(index=index_name, id=doc_id)
    except Exception as e:
        
        logger.exception('Exception occured in check_field function %s' % e)
        if check_elastic_status():
            document = es.get(index=index_name, id=doc_id)            
        # print(f"Error retrieving document: {e}")
        logger.exception('Exception solved in check_field function due to %s' % e)

        return False

    # Check if the document exists and if the field exists in the document
    if document and '_source' in document:
        if field_name in document['_source']:
            # print(f"Field '{field_name}' exists in the document with ID '{doc_id}'")
            return True
        else:
            # print(f"Field '{field_name}' does not exist in the document with ID '{doc_id}'")
            return False
    else:
        # print(f"Document with ID '{doc_id}' not found in index '{index_name}'")
        return False
    


def num_tokens_from_string(string: str, model: str) -> int:
    """Returns the number of tokens in a text string."""
    encoding = tiktoken.encoding_for_model(model)
    num_tokens = len(encoding.encode(string))
    return num_tokens


quest_response = openai.chat.completions.create(
    model='gpt-3.5-turbo-1106',
    n=1,
    messages=[
        {"role": "system", "content": f"Can you provide 5 similar sentences  '{user_prompt}'?"},
        {"role": "user", "content": user_prompt},
    ])
input_token_cost_gpt35 = num_tokens_from_string(user_prompt, 'gpt-3.5-turbo-1106') 
output_token_cost_gpt35 = num_tokens_from_string(quest_response.choices[0].message.content, 'gpt-3.5-turbo-1106')
print('output',quest_response.choices[0].message.content)
# Extracting 'content' from the provided data
extracted_content = quest_response.choices[0].message.content

questions=extracted_content
# search_query = {
#     "query": {
#         "match": {
#             "history": {
#             'query':user_prompt,
#             'operator' : 'AND'
#             }
#         }
#     },
#    "size": 100
# }

# # Execute the search
# try:
#     results = es.search(index=es_index_name,  body=search_query)
# except Exception as e:    
#     logger.exception('Exception in es search function due to %s' % e)
#     if check_elastic_status():
#         results = es.search(index=es_index_name,  body=search_query)
# # logger.info(results)
# ids=[]
# # Print the results
# for hit in results['hits']['hits']:
#     ids.append((hit['_id']))

update_script = {
    "script": {
        "source": """
            if (ctx._source.containsKey('questions')) {
                ctx._source.questions.addAll(params.value);
            } else {
                ctx._source.questions = params.value;
            }
        """,
        "lang": "painless",
        "params": {
            "value": questions
        }
    }
}    

# for i in ids:
#     if not check_field_existence(es_index_name, i, "questions"):
#         try:
#             # logger.info('if ques satisfied')

#             es.update(index=es_index_name, id=i, body=update_script) 
#         except Exception as e:    
#             logger.exception('Exception in es update function due to %s' % e)
#             if check_elastic_status():
#                 es.update(index=es_index_name, id=i, body=update_script) 
#     else:
#         logger.info('else ques satisfied')

        
data_samples = {
    'question': [user_prompt],
    # 'answer': ["Bottom-up pricing is a pricing strategy where the final price of a product or service is determined by starting with the cost of production and then adding a markup to ensure profitability. This approach considers all the costs involved in producing the product or service, including materials, labor, and overhead, to calculate the base cost. Then, a desired profit margin is added on top of these costs to set the selling price. Unlike top-down pricing, which starts with the market price and works backward to determine what margins are possible, bottom-up pricing focuses on the costs first to ensure that all expenses are covered and a profit is made."],
    'answer': [answer],
    'contexts' : [context],
}

langchain_llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106")
dataset = Dataset.from_dict(data_samples)
# score = evaluate(dataset,metrics=[answer_relevancy],llm=gpt4)
score = evaluate(dataset,metrics=[faithfulness],llm=langchain_llm)
# logger.info(score)
if math.isnan(score['faithfulness']):
    # logger.info('NaN found')
    score['faithfulness']=0
    # logger.info(score['faithfulness'])
# for i in ids:
#     # logger.info('yes')
#     if not check_field_existence(es_index_name, i, "percentage"):
#         try:
#             # logger.info('if perc satisfied')
#             existing_doc = es.get(index=es_index_name, id=i)
#             existing_doc["_source"]["percentage"] = round(score['faithfulness']*100,2)
#             es.index(index=es_index_name, id=i, body=existing_doc["_source"])
#             existing_doc["_source"]["meta_data_source"] = formatted_file_paths
#             es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 

#         except Exception as e:    
#             logger.exception('Exception in es append function due to %s' % e)
#             if check_elastic_status():
#                 existing_doc = es.get(index=es_index_name, id=i)
#                 existing_doc["_source"]["percentage"] = round(score['faithfulness']*100,2)
#                 es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
#                 existing_doc["_source"]["meta_data_source"] = formatted_file_paths
#                 es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 

#     else:
#         logger.info('else perc satisfied')
        
try:
    es.update(index=es_index_name, id=document_id_user, body=update_script) 
    existing_doc = es.get(index=es_index_name, id=document_id_user)
    # existing_doc["_source"]["questions"] = update_script
    # Step 3: Update the percentage field
    existing_doc["_source"]["percentage"] = round(score['faithfulness'] * 100, 2)
    
    # Step 4: Update input and output tokens
    existing_doc["_source"]["input_token_accu"] = input_token_cost_gpt35
    existing_doc["_source"]["output_token_accu"] = output_token_cost_gpt35
    existing_doc["_source"]["gpt35_modelname"] = 'gpt-3.5-turbo-1106'
    
    # Step 5: Update the meta_data_source field
    existing_doc["_source"]["meta_data_source"] = formatted_file_paths
    
    # Step 6: Index the updated document back into Elasticsearch using the same ID
    es.index(index=es_index_name, id=document_id_user, body=existing_doc["_source"])
    
    userorg_docid = es.get(index=organ_token_ind, id=organ_id_user)
    doc_source2 = userorg_docid['_source']
    logger.info('org doc_source%s'%doc_source2)
    
    # Retrieve existing token values with default values
    overall_tokens2 = doc_source2.get('overall_tokens', 0)
    in_tokens2 = doc_source2.get('input_tokens', 0)
    out_tokens2 = doc_source2.get('output_tokens', 0)
    logger.info('dorg over oc_source%s'%overall_tokens2)
    new_tokens2 =overall_tokens2+ (input_token_cost_gpt35+output_token_cost_gpt35)

    update_doc = {
            'doc': {
                'overall_tokens': new_tokens2,
                'input_tokens':in_tokens2+input_token_cost_gpt35,
                'output_tokens':out_tokens2+output_token_cost_gpt35,
            }
        }
    logger.info('user docid overall_tokens%s'%new_tokens2)
        # Update the existing document
    es.update(index=organ_token_ind, id=organ_id_user, body=update_doc)
   
    
except Exception as e:    
    logger.exception('Exception in es append function due to %s' % e)
    # if check_elastic_status():
    es.update(index=es_index_name, id=document_id_user, body=update_script) 
    existing_doc = es.get(index=es_index_name, id=document_id_user)
    # existing_doc["_source"]["questions"] = update_script
    # Step 3: Update the percentage field
    existing_doc["_source"]["percentage"] = round(score['faithfulness'] * 100, 2)
    
    # Step 4: Update input and output tokens
    existing_doc["_source"]["input_token_accu"] = input_token_cost_gpt35
    existing_doc["_source"]["output_token_accu"] = output_token_cost_gpt35
    existing_doc["_source"]["gpt35_modelname"] = 'gpt-3.5-turbo-1106'
    
    # Step 5: Update the meta_data_source field
    existing_doc["_source"]["meta_data_source"] = formatted_file_paths
    
    # Step 6: Index the updated document back into Elasticsearch using the same ID
    es.index(index=es_index_name, id=document_id_user, body=existing_doc["_source"])
    logger.exception('Exception resolved')
    
    userorg_docid = es.get(index=organ_token_ind, id=organ_id_user)
    doc_source2 = userorg_docid['_source']
    logger.info('org doc_source%s'%doc_source2)
    
    # Retrieve existing token values with default values
    overall_tokens2 = doc_source2.get('overall_tokens', 0)
    in_tokens2 = doc_source2.get('input_tokens', 0)
    out_tokens2 = doc_source2.get('output_tokens', 0)
    logger.info('dorg over oc_source%s'%overall_tokens2)
    new_tokens2 =overall_tokens2+ (input_token_cost_gpt35+output_token_cost_gpt35)

    update_doc = {
            'doc': {
                'overall_tokens': new_tokens2,
                'input_tokens':in_tokens2+input_token_cost_gpt35,
                'output_tokens':out_tokens2+output_token_cost_gpt35,
            }
        }
    logger.info('user docid overall_tokens%s'%new_tokens2)
        # Update the existing document
    es.update(index=organ_token_ind, id=organ_id_user, body=update_doc)



                
# logger.info('data_updated')                
if score['faithfulness']<0.5:
    # create the section
    myMessageSection = pymsteams.cardsection()

    # Section Title
    myMessageSection.title("Warning: Answer relevancy evaluation below threshold")

    # Activity Elements
    myMessageSection.activityTitle(f'Question: {user_prompt}')

    myMessageSection.activitySubtitle(f'Answer: {answer}')
    myMessageSection.activityText("The score is "+str(score['faithfulness'])+" The generated percentage was determined based on context and actual output")


    myMessageSection.text(f"Context: {context}")

    text1 = "sharepoint Link1"
    url1 = sharp_path
    myTeamsMessage.text(text1)
    myTeamsMessage.addLinkButton(text1, url1)

    myTeamsMessage.addSection(myMessageSection)
    myTeamsMessage.text(" ")
    myTeamsMessage.send()
    
    subject = "Answer below threshold"
    message = f'''Warning: Answer relevancy evaluation below threshold
                  Question: {user_prompt}
                  Answer: {answer}
                  The answer is below threshold 0.5
                  Context: {context}
                  {sharepoint_var}'''
    to_email =['sountharya@qubespace.in'] 
    send_email(subject, message, to_email)
