# 这是ocr的第二个版本 实现对题型的自动判别以及相应的后续处理（阅读，选择，翻译，作文）
# 通用 返回题型，ocr结果，在es中的id
# 阅读：领域 关键词
# 选择：每个选择题的考点
# 作文: 关键词 领域
# 翻译: 关键词 领域

# 功能：对（选择题，阅读题，翻译题，作文题）实现ocr,并将其返回为正确的格式
import requests
import base64
import re
import os
import json
from openai import OpenAI
import threading
import time
from prompt.choice_prompt import choice_prompt
from prompt.reading_prompt import reading_prompt
from prompt.identify_prompt import identify_prompt
import es_option.es_api as es_option

from es_option.es_api import get_gpt_embedding
import gpt.gpt_key as gpt_key

start_time = time.time()
access_token = "24.9825cfb6262e262a5caa6fef4f531037.2592000.1713240501.282335-56894520"
src = "C:\\Users\\李妃\\Desktop\\xuanze.jpg"
api_key = gpt_key.gpt_key
os.environ["http_proxy"] = "http://127.0.0.1:10808"
os.environ["https_proxy"] = "http://127.0.0.1:10808"

# 用于线程返回结果的变量
res_reading = None  # 都修改成了json
res_type = None  # 都修改成了json
res_choice = None  # 都修改成了json


# 实现ocr输出 并且矫正了格式


def ocr_first(baidu_api, src):
    """
    实现对不同题型拍照
    :param baidu_api: 百度ocr_key
    :param src: 图片的路径
    :return: 对应的json格式，不同题，不同json
    """
    # 先获取百度返回的接口
    access_token = baidu_api
    request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/accurate"
    # 二进制方式打开图片文件
    f = open(src, "rb")
    img = base64.b64encode(f.read())
    params = {"image": img, "language_type": "CHN_ENG", "result_type": "big"}
    request_url = request_url + "?access_token=" + access_token
    headers = {"content-type": "application/x-www-form-urlencoded"}
    response = requests.post(request_url, data=params, headers=headers)
    print(response)
    return response


def ocr_srcond(response, type):
    """
    此时已经获得了ocr的返回的初步内容，然后对其进行格式微调
    :param response: ocr返回的初步结果
    :param type: 类型 reading choice translation writing
    :return:
    """
    # 分内容进行格式矫正
    if type == "reading":
        # 先计算出最大宽度 带位置的，高精度的ocr
        max_width = 0
        for words in response.json()["words_result"]:
            max_width = max(max_width, words["location"]["width"])
        article = " "
        # 在最大的50行里面的，后面只加空格，其他的都加换行符
        for words in response.json()["words_result"]:
            if words["location"]["width"] >= max_width - 50:
                # 后面只加空格
                article = article + words["words"] + " "
            else:
                article = article + words["words"] + "\n" + "  "
        return article
    if type == "choice":
        max_width = 0
        for words in response.json()["words_result"]:
            max_width = max(max_width, words["location"]["width"])

        choice_partten = "^[abcdABCD]\..*"  # 以A. B.等等开头的
        # 逻辑，当这个字符串的下个字符串和下一个字符串在一行，且不是以A. B. 开头的，加上括号
        # 不在一行直接换行
        article = " "
        # 在最大的50行里面的，后面只加空格，其他的都加换行符
        # 判断自己下一个是不是同一行
        now_words = response.json()["words_result"][0]
        same_row = 0
        for i, words in enumerate(response.json()["words_result"]):
            if i > 0:
                # 判断自己和前面的一个是不是同一行的
                if (
                    now_words["location"]["top"] + 10
                    > words["location"]["top"]
                    > now_words["location"]["top"] - 10
                ):
                    # 同一行
                    same_row = same_row + 1
                    # 判断下一个是不是选项开头
                    if re.match(choice_partten, words["words"]):
                        # 选项开头, 就不加括号
                        article = article + now_words["words"] + " "
                        same_row = 0
                    else:
                        # 非选项开头，加括号
                        article = article + now_words["words"] + "( )"
                else:
                    # 不是同一行，判断前面是不是有括号
                    if same_row == 0 and not re.match(
                        choice_partten, now_words["words"]
                    ):
                        # 说明前面没括号
                        article = article + now_words["words"] + "( )"
                    else:
                        article = article + now_words["words"] + " "
                now_words = words
        article = article + response.json()["words_result"][-1]["words"]
        return article
    if type == "translation" or type == "writing":
        # 先计算出最大宽度 带位置的，高精度的ocr
        max_width = 0
        for words in response.json()["words_result"]:
            max_width = max(max_width, words["location"]["width"])
        article = " "
        # 在最大的50行里面的，后面只加空格，其他的都加换行符
        for words in response.json()["words_result"]:
            if words["location"]["width"] >= max_width - 50:
                # 后面只加空格
                article = article + words["words"] + " "
            else:
                article = article + words["words"] + "\n" + "  "
        return article


def call_openai(api_key, prompt):
    """
    封装调用openai的接口
    :param api_key: openai_key
    :param prompt: 输入给gpt的提示词语
    :return: str open_ai的返回结果
    """
    client = OpenAI(
        api_key=api_key,
    )
    response = client.chat.completions.create(
        model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
    )
    return response.choices[0].message.content.strip()


# 对于判断题型的函数
def identify_type(ocr_res, api_key):
    """
    实现对题型的判定
    :param ocr_res: 初步的ocr结果,百度云的结果
    :return:Example JSON format:
    {{
    "Type": "choice"
    }}
    {{
    "Type": "reading",
    "Filed": "The field to which the reading question belongs",
    "Keyword": ["The keywords for this reading question"]
    }}
    {{
    "Type": "translation",
    "Filed": "The field to which the translation question belongs",
    "Keyword": ["The keywords for this reading question"]
    }}
    {{
    "Type": "writing",
    "Filed": "The field to which the essay question belongs",
    "Keyword": ["The keywords for this essay question"],
    "Genre": "The genre of this essay"
    }}
    """
    global res_type  # 最后需要这个结果
    ocr_res = ocr_srcond(ocr_res, "reading")
    identify_ocr_prompt = identify_prompt.format(ocr_res)
    res = call_openai(api_key=api_key, prompt=identify_ocr_prompt)
    res_type = json.loads(res)


# 对于阅读部分的最终结果
def reading_ocr_res(ocr_res, openai_key):
    """
    对ocr的结果，进行修改，返回给前端,writing和translation返回正确格式的字符串,选择和阅读，则是字符串，
    :param baidu_api: 百度云key
    :param openai_key: gptkey
    :param src: 图片路径
    :param type: 题目类型
    :return:
    """
    global res_reading
    ocr_res = ocr_srcond(ocr_res, "reading")
    # print("=====================ocr_res================================")
    # print(ocr_res)

    reading_ocr_prompt = reading_prompt.format(ocr_res)
    result = call_openai(openai_key, reading_ocr_prompt)
    # print("========================reading_res===========================")
    # print(result)
    # print("==============================================================")
    res_reading = json.loads(result)
    return res_reading


# 对于选择题的部分的最终结果
def choice_ocr_res(ocr_res, openai_key):
    """
    对于选择题部分的ocr最后结果
    :param ocr_res: 初次访问百度云的结果
    :param openai_key:
    :return: 对应的json格式
     "questions": [
    {{
      "question": "What is the capital of France?",
      "options": ["Paris", "Berlin", "London", "Rome"],
     "knowledgePoints": "The knowledge point to which this multiple-choice question belongs"
    }}]
    """
    global res_choice
    # 调用ocr，将其格式化
    ocr_res = ocr_srcond(ocr_res, "choice")
    # 调用gpt分析
    prompt = choice_prompt.format(ocr_res)
    # print(prompt)
    # 调用gpt
    res = call_openai(api_key=api_key, prompt=prompt)
    res_choice = json.loads(res)


# 最后的ocr过程:
def add_data(index_name, baidu_api, openai_key, src):
    # 对于
    # 创建线程
    ocr_res = ocr_first(baidu_api=baidu_api, src=src)  # 返回的就是百度云返回的结果
    reading_thread = threading.Thread(
        target=reading_ocr_res, args=[ocr_res, openai_key]
    )
    identify_thread = threading.Thread(target=identify_type, args=[ocr_res, openai_key])
    choice_thread = threading.Thread(target=choice_ocr_res, args=[ocr_res, openai_key])
    # 启动线程
    reading_thread.start()
    identify_thread.start()
    choice_thread.start()
    res_translation = ocr_srcond(ocr_res, "translation")
    res_writing = ocr_srcond(ocr_res, "writing")

    # 等待线程结束
    # reading_thread.join()
    identify_thread.join()
    # choice_thread.join()

    # 完成下列操作
    print(res_type)
    if res_type["type"] == "reading":
        reading_thread.join()
        print(res_reading)
        # 将其数据存入数据库里面
        return_data = {"type": "reading"}
        question_type = "reading"
        original_text = res_reading["original_text"]
        reading_type = ""
        for item in res_reading["questions"]:
            reading_type = reading_type + " " + item["question_type"]
            for option in item["options"]:
                original_text = reading_type + " " + option["Option"]
        print(f"==========reading_type:{reading_type}=================")
        original_vector = get_gpt_embedding(api_key, original_text)
        reading_type_vector = get_gpt_embedding(api_key, reading_type)
        keyword_text = ""
        for item in res_type["keyword"]:
            keyword_text = keyword_text + " " + item

        print(f"==================key_word:{keyword_text}=============")
        keyword_vector = get_gpt_embedding(api_key, keyword_text)
        filed = res_type["field"]
        filed_vector = get_gpt_embedding(api_key, filed)
        add_res = es_option.add_reading_data_es(
            index_name,
            question_type,
            original_vector,
            keyword_vector,
            keyword_text,
            reading_type_vector,
            filed_vector,
            filed,
        )
        print(
            "==========================添加到选择题返回的结果========================"
        )
        print(add_res)
        print(f"res的type{type(add_res)}")
        print("=====================================================================")
        print(f"耗时：{time.time() - start_time}")
    else:
        print("不是阅读题")

    # 选择题后面的操作
    if res_type["type"] == "choice":
        choice_thread.join()
        print(res_choice)
        return_data = {"type": "choice"}
        modify_data = []
        for item in res_choice["questions"]:
            original_text = item["question"]
            for option in item["options"]:
                original_text = original_text + " " + option
            # 利用这个original_text获得对应的文本嵌入
            original_vector = get_gpt_embedding(api_key, original_text)
            keyword_text = item["knowledgePoints"]
            keyword_vector = get_gpt_embedding(api_key, keyword_text)
            # 将上面的数据存入数据库中
            question_type = "choice"
            add_res = es_option.add_choice_data_es(
                index_name, question_type, original_vector, keyword_vector, keyword_text
            )
            print(
                "==========================添加到选择题返回的结果========================"
            )
            print(add_res)
            print(f"res的type{type(add_res)}")
            print(
                "====================================================================="
            )
        print(f"耗时：{time.time() - start_time}")
    else:
        print("不是选择题")

    if res_type["type"] == "translation":
        # 翻译题，这个就是字符串
        return_data = {"type": "translation"}
        question_type = "translation"
        original_vector = get_gpt_embedding(api_key, res_translation)
        keyword = ""
        for item in res_type["keyword"]:
            keyword = keyword + item
        keyword_vector = get_gpt_embedding(api_key, keyword)
        field = res_type["field"]
        field_vector = get_gpt_embedding(api_key, field)
        # 将其添加到数据库
        add_res = es_option.add_translation_data_es(
            index_name,
            question_type,
            original_vector,
            keyword_vector,
            keyword,
            field_vector,
            field,
        )
        add_res = json.loads(add_res)
        print(add_res)
        return return_data
    else:
        print("不是翻译题")

    if res_type["type"] == "writing":
        return_data = {"type": "writing"}
        question_type = "writing"
        original_vector = get_gpt_embedding(api_key, res_writing)
        keyword = ""
        for item in res_type["keyword"]:
            keyword = keyword + item
        keyword_vector = get_gpt_embedding(api_key, keyword)
        field = res_type["field"]
        field_vector = get_gpt_embedding(api_key, field)
        genre = res_type["genre"]
        genre_vector = get_gpt_embedding(api_key, genre)
        # 将其存到数据库中
        add_res = es_option.add_writing_data_es(
            index_name,
            question_type,
            original_vector,
            keyword_vector,
            keyword,
            field_vector,
            field,
            genre,
            genre_vector,
        )
        add_res = json.loads(add_res)
        print(add_res)
    else:
        print("不是作文题")


add_data(
    index_name="original_question", baidu_api=access_token, openai_key=api_key, src=src
)
