import openai
from decouple import config
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from elasticsearch import Elasticsearch
import pymsteams
import sys,os
from datasets import Dataset 
from ragas.metrics import answer_relevancy,faithfulness
from ragas import evaluate
from langchain.chat_models import ChatOpenAI
import requests
from requests.auth import HTTPBasicAuth
# myTeamsMessage = pymsteams.connectorcard("https://qubespacein.webhook.office.com/webhookb2/4aca0511-270e-4729-a2c0-acffe9e0a9f5@d996ac79-e80a-4fc8-be1a-8f891b569988/IncomingWebhook/35a3fab06f984bc7ae5665a6dbba50a9/1db04433-eddc-4dcb-9620-12fd2cb80285")
sys.path.append(os.path.join(os.path.dirname(__file__))+'/percentage_log/')
myTeamsMessage = pymsteams.connectorcard(
    "https://scic.webhook.office.com/webhookb2/0f876dfb-d297-42b8-bcba-cb2a70ffcb1c@95f69e7d-1811-4ab9-9b5a-eba95d3eba9b/IncomingWebhook/dd73ff8af8b34c1580fd9a36e1292282/38d089c8-84f7-4d5b-8e71-1490da1ce09e"
)
import math
# from office365.runtime.auth.client_credential import ClientCredential
# from office365.sharepoint.client_context import ClientContext
import datetime

from percentage_log import LogUtils
logger = LogUtils.getRootLogger()

es= Elasticsearch(config('ES_URL'),request_timeout=300,retry_on_timeout=True)
os.environ["OPENAI_API_KEY"] = config('SECRET_KEY')

es_index_name =config("es_index_name")
#logger.info('entered logger info')
#logger.info('es_index_name %s'%es_index_name)

def format_file_paths(file_paths_str):
    # Split the input string by comma to get individual file paths
    file_paths = file_paths_str.split(", ")
    
    # Initialize an empty list to store the formatted file paths
    formatted_paths = []
    
    # Iterate through the file paths and format them with numbers
    for idx, path in enumerate(file_paths, start=1):
        formatted_paths.append(f"{idx}. {path}")
    
    # Join the formatted paths into a single string separated by newlines
    result = '\n'.join(formatted_paths)
    
    return result



# logger.info(es_index_name)
user_prompt=sys.argv[1]
answer=sys.argv[2]
context=sys.argv[3].split('_lc_kwargs')
# contactId=sys.argv[4]
meta_data_source = sys.argv[4]
formatted_file_paths = format_file_paths(meta_data_source)



def split_day_into_intervals(interval_hours):
    intervals = []
    start_time = datetime.datetime.strptime("00:00", "%H:%M")
    while start_time < datetime.datetime.strptime("23:59", "%H:%M"):
        end_time = start_time + datetime.timedelta(hours=interval_hours)
        intervals.append((start_time, end_time))
        start_time = end_time
    return intervals

def get_current_interval(interval_hours=2):
    current_time = datetime.datetime.utcnow().time()
    intervals = split_day_into_intervals(interval_hours)
    for idx, (start, end) in enumerate(intervals):
        if start.time() <= current_time <= end.time():
            return idx + 1, start.strftime("%H:%M"), end.strftime("%H:%M")
    return None

# current_interval = get_current_interval()

# site_url = 'https://scic.sharepoint.com/d365integration'
# app_principal = {
#     'client_id': '5da0927e-1cc7-4538-8947-454b37a13466',
#     'client_secret': 'bkFGEOEi7FBvR2poxhHfYBxUAh2Eerv5q2m4OunUSzE=',
# }

# sp_site = 'https://scic.sharepoint.com/d365integration/'
# relative_url = "/d365integration/Shared Documents/Chat Bot Export/excel"


# client_credentials = ClientCredential(app_principal['client_id'], app_principal['client_secret'])
# ctx = ClientContext(sp_site).with_credentials(client_credentials)

# libraryRoot = ctx.web.get_folder_by_server_relative_path(relative_url)
# list_files = libraryRoot.files
# ctx.load(list_files)
# ctx.execute_query()

# new_list_files=[list_files[i] for i,fy in enumerate(list_files) if (fy.properties["TimeLastModified"]).date() == datetime.date.today()]
# report_file=[]
# for val,i in enumerate(new_list_files):
#     sec_hour=str(i).split('Chat_bot')[-1].split('T')[-1].split('_')[0]
#     if sec_hour==current_interval[2].split(':')[0]:
        # report_file.append(new_list_files[val])

#sharp_path='https://scic.sharepoint.com/d365integration/Shared%20Documents/Chat%20Bot%20Export/excel/'
sharp_path='https://scic.sharepoint.com/d365integration/Shared%20Documents/Forms/AllItems.aspx?id=%2Fd365integration%2FShared%20Documents%2FChat%20Bot%20Export%2FAlliBot%202%2E0%2FCsv%20Export%20to%20Excel'

# sharp_path1=''
# if report_file and len(report_file)==1:
#     sharp_path1=sharp_path+'/'+str(report_file[0]) 
#     sharepoint_var='\n Please refer this sharepoint link for botcoaching powerbi report\n'+sharp_path1+'\n if file not found check with this link for latest report\n'+sharp_path+'\n'

# else:
sharepoint_var='\n Please refer this sharepoint link for botcoaching powerbi latest report\n'+sharp_path+'\n'

def check_elastic_status():
    basic = HTTPBasicAuth('elastic', '8oKIqy312EBsAPzWT64NUzji')
    response=requests.get('https://scic-elasticsearch.es.us-central1.gcp.cloud.es.io', auth=basic)
    if response.status_code == 200:
        return True
    else:
        return False
    
def send_email(subject, message, To):
    from_email = 'allibot@scic.com'  # Your Outlook email address
    password = 'IgX8N7ruvFjWU6U!' # Your Outlook app password (if using two-factor authentication)
    msg = MIMEMultipart()
    msg['From'] = from_email
    msg['To'] = ",".join(To)
    Mail_address=To
    
    msg['Subject'] = subject
    msg.attach(MIMEText(message, 'plain'))
    # Outlook SMTP server settings
    smtp_server = 'smtp.office365.com'
    smtp_port = 587
    try:
        server = smtplib.SMTP(smtp_server, smtp_port)
        server.starttls()  # Start TLS encryption
        server.login(from_email, password)
        text = msg.as_string()
        server.sendmail(from_email, Mail_address, text)
        # print("Email sent successfully.")
    except Exception as e:
        print(f"Error sending email: {str(e)}")
        logger.exception('Exception occured due to %s' % e)

    finally:
        server.quit()
            
def check_field_existence(index_name, doc_id, field_name):
    # Perform a search query to retrieve the document
    try:
        document = es.get(index=index_name, id=doc_id)
    except Exception as e:
        
        logger.exception('Exception occured in check_field function %s' % e)
        if check_elastic_status():
            document = es.get(index=index_name, id=doc_id)            
        # print(f"Error retrieving document: {e}")
        logger.exception('Exception solved in check_field function due to %s' % e)

        return False

    # Check if the document exists and if the field exists in the document
    if document and '_source' in document:
        if field_name in document['_source']:
            # print(f"Field '{field_name}' exists in the document with ID '{doc_id}'")
            return True
        else:
            # print(f"Field '{field_name}' does not exist in the document with ID '{doc_id}'")
            return False
    else:
        # print(f"Document with ID '{doc_id}' not found in index '{index_name}'")
        return False
    
quest_response = openai.chat.completions.create(
    model='gpt-3.5-turbo-1106',
    n=1,
    messages=[
        {"role": "system", "content": f"Can you provide 5 similar sentences  '{user_prompt}'?"},
        {"role": "user", "content": user_prompt},
    ])

# Extracting 'content' from the provided data
extracted_content = quest_response.choices[0].message.content

questions=extracted_content
# logger.info(questions)
# Define your search query
search_query = {
    "query": {
        "match": {
            "history": {
            'query':user_prompt,
            'operator' : 'AND'
            }
        }
    },
   "size": 100
}

# Execute the search
try:
    results = es.search(index=es_index_name,  body=search_query)
except Exception as e:    
    logger.exception('Exception in es search function due to %s' % e)
    if check_elastic_status():
        results = es.search(index=es_index_name,  body=search_query)
# logger.info(results)
ids=[]
# Print the results
for hit in results['hits']['hits']:
    ids.append((hit['_id']))
    #print(ids)
# logger.info(ids)   
update_script = {
    "script": {
        "source": """
            if (ctx._source.containsKey('questions')) {
                ctx._source.questions.addAll(params.value);
            } else {
                ctx._source.questions = params.value;
            }
        """,
        "lang": "painless",
        "params": {
            "value": questions
        }
    }
}    

for i in ids:
    if not check_field_existence(es_index_name, i, "questions"):
        try:
            # logger.info('if ques satisfied')

            es.update(index=es_index_name, id=i, body=update_script) 
        except Exception as e:    
            logger.exception('Exception in es update function due to %s' % e)
            if check_elastic_status():
                es.update(index=es_index_name, id=i, body=update_script) 
    else:
        #logger.info('else ques satisfied')
        print('else satisfied')
# logger.info('question updated')      
        # print('success')
        
data_samples = {
    'question': [user_prompt],
    # 'answer': ["Bottom-up pricing is a pricing strategy where the final price of a product or service is determined by starting with the cost of production and then adding a markup to ensure profitability. This approach considers all the costs involved in producing the product or service, including materials, labor, and overhead, to calculate the base cost. Then, a desired profit margin is added on top of these costs to set the selling price. Unlike top-down pricing, which starts with the market price and works backward to determine what margins are possible, bottom-up pricing focuses on the costs first to ensure that all expenses are covered and a profit is made."],
    'answer': [answer],
    'contexts' : [context],
}

langchain_llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106")
dataset = Dataset.from_dict(data_samples)
# score = evaluate(dataset,metrics=[answer_relevancy],llm=gpt4)
score = evaluate(dataset,metrics=[faithfulness],llm=langchain_llm)
# logger.info(score)
if math.isnan(score['faithfulness']):
    # logger.info('NaN found')
    score['faithfulness']=0
    # logger.info(score['faithfulness'])
for i in ids:
    # logger.info('yes')
    if not check_field_existence(es_index_name, i, "percentage"):
        try:
            # logger.info('if perc satisfied')
            existing_doc = es.get(index=es_index_name, id=i)
            existing_doc["_source"]["percentage"] = round(score['faithfulness']*100,2)
            es.index(index=es_index_name, id=i, body=existing_doc["_source"])
            existing_doc["_source"]["meta_data_source"] = formatted_file_paths
            es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
            # existing_doc["_source"]["contactId"] = contactId
            # es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
        except Exception as e:    
            logger.exception('Exception in es append function due to %s' % e)
            if check_elastic_status():
                existing_doc = es.get(index=es_index_name, id=i)
                existing_doc["_source"]["percentage"] = round(score['faithfulness']*100,2)
                es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
                existing_doc["_source"]["meta_data_source"] = formatted_file_paths
                es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
                # existing_doc["_source"]["contactId"] = contactId
                # es.index(index=es_index_name, id=i, body=existing_doc["_source"]) 
    else:
        #logger.info('else perc satisfied')
        print('update else satisfied')

# logger.info('percentage updated')

                
# logger.info('data_updated')                
if score['faithfulness']<0.5:
    # create the section
    myMessageSection = pymsteams.cardsection()

    # Section Title
    myMessageSection.title("Warning: Answer relevancy evaluation below threshold")

    # Activity Elements
    myMessageSection.activityTitle(f'Question: {user_prompt}')

    myMessageSection.activitySubtitle(f'Answer: {answer}')
    myMessageSection.activityText("The score is "+str(score['faithfulness'])+" The generated percentage was determined based on context and actual output")


    myMessageSection.text(f"Context: {context}")

    text1 = "sharepoint Link1"
    url1 = sharp_path
    myTeamsMessage.text(text1)
    myTeamsMessage.addLinkButton(text1, url1)

    # text2 = "sharepoint Link2"
    # if sharp_path1:
    #     url2 = sharp_path1
    # else:
        # url2=sharp_path
    # myTeamsMessage.text(text2)
    # myTeamsMessage.addLinkButton(text2, url2)
    
    # Add your section to the connector card object before sending
    myTeamsMessage.addSection(myMessageSection)
    myTeamsMessage.text(" ")
    myTeamsMessage.send()
    
    subject = "AlliBot V2.0 Response Accuracy Below Target Threshold"
    message = f'''Warning: Answer relevancy evaluation below threshold
                  Question: {user_prompt}
                  Answer: {answer}
                  The answer is below threshold 0.5
                  Context: {context}
                  {sharepoint_var}'''
    to_email =[
        "sountharya@qubespace.in",
    ]
    send_email(subject, message, to_email)
