import os

os.environ["CUDA_VISIBLE_DEVICES"] = '2,3'
import jsonlines
import traceback
from modelscope import AutoModelForCausalLM, AutoTokenizer, snapshot_download
from modelscope import GenerationConfig
from langchain.prompts import ChatPromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
import numpy as np

from sentence_transformers import SentenceTransformer

# model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B'
model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B-Chat/'
# model_dir = '/datasets/fengjiahao/nlp/qwen/Qwen-7B-Chat/'
# model_dir = '/datasets/fengjiahao/nlp/TongyiFinance/Tongyi-Finance-14B-Chat-Int4/'
# question_json_path = r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/question.json'
question_json_path = r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/submit_result.jsonl'
answer_path = r'/datasets/fengjiahao/nlp/bs_challenge_financial_14b_dataset/submit_result2.jsonl'
# fund_db_path = r'/public/tmp/fengjiahao/bs_challenge_financial_14b_dataset/dataset/博金杯比赛数据.db'
pdf_root = r'/public/tmp/fengjiahao/bs_challenge_financial_14b_dataset/pdf_txt_file/'
company_list_path = r'companys.csv'

tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True).eval()
model.generation_config = GenerationConfig.from_pretrained(model_dir, trust_remote_code=True)  # 可指定不同的生成长度、top_p等相关超参


embedding_model = SentenceTransformer('/datasets/fengjiahao/nlp/m3e-base/')
# conn.configure("busyTimeout", 6000)


content = []
company_list = []
with jsonlines.open(question_json_path, "r") as json_file:
    for obj in json_file.iter(type=dict, skip_invalid=True):
        content.append(obj)

with open(company_list_path, 'r') as file:
    for line in file:
        company_name,filename = line.strip().split(',')
        company_list.append((company_name,filename))

get_info_template = ChatPromptTemplate.from_template(
    "你是一个能精准提取文本信息并回答问题的AI。\n"
    "请根据以下资料的所有内容，首先帮我判断能否依据给定材料回答出问题。"
    "如果能根据给定材料回答，则提取出最合理的答案来回答问题,并回答出完整内容，不要输出表格：\n\n"
    "{text}\n\n"
    "请根据以上材料回答：{q}\n\n"
    "请按以下格式输出：\n"
    "能否根据给定材料回答问题：回答能或否\n"
    "答案：")
get_keyword_template = ChatPromptTemplate.from_template(
    "你是一个能精准提取问题中的关键词的AI。\n"
    "关键词用于定位招标书中问题答案的位置"
    "给予如下的问题，提取其中的关键词:\n\n"
    "问题:湖南长远锂科股份有限公司变更设立时作为发起人的法人有哪些?"
    "关键词:法人"
    "问题:{q}"
    "关键词:"
)
# answer_prompt_template = ChatPromptTemplate.from_template(
#     "你是一个会组织语言的AI。"
#     "我会给你一个问题，和相应的答案，请为我完整写出答案句\n"
#     "问题：{q}\n"
#     "答案：{a}\n"
#     "答案句为："
# )


def text_split(content):
    """ 将文本分割为较小的部分 """
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1500,
        chunk_overlap=100,
        separators=['\n\n', "\n", "。"],
        keep_separator=False)
    return text_splitter.split_text(content)

def cosine_similarity(a, b):
    return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

def text_similarity(text, embedding):
    """ 计算文本和问题的相似度 """
    text_embedding = embedding_model.encode(text)
    return cosine_similarity(text_embedding, embedding)


def ask_llm(ori_question):
    target_filename = None
    target_company_name = None
    for company_name,filename in company_list:
        if company_name in ori_question:
            target_filename = filename
            target_company_name= company_name
            break

    pdf_path = os.path.join(pdf_root,target_filename)
    if target_filename is not None and os.path.exists(pdf_path):
        with open(pdf_path,'r', encoding='utf-8') as file:
            content = file.read()

    text_list = text_split(content)




    print("!!!Q:", question)
    print('>>>',target_company_name)

    prompt = get_keyword_template.format_messages(q=ori_question)
    response, history = model.chat(tokenizer, prompt[0].content, history=None)
    target_text = ''
    for text in text_list:
        if response in text:
            target_text+=text+'\n'

    prompt = get_info_template.format_messages(text=target_text,q=ori_question)
    response, history = model.chat(tokenizer, prompt[0].content, history=None)
    print('!!!response1:', response)
    response = str(response).split('答案：')[1]
    print("!!!A:", response)

    return response


for i, cont in enumerate(content):
    question = cont['question']
    if 'answer' not in cont or cont['answer'] == '':
        response = ask_llm(question)
        # print(question)

        cont['answer'] = response
        with jsonlines.open(answer_path, "w") as json_file:
            json_file.write_all(content)
    # 市盈率是最常用来评估股价水平是否合理的指标之一，是指股票价格与每股盈利的比率。...


