
# import pandas as pd
import pyodbc
import json
import re
from openai import OpenAI
import sys
import io

# 强制标准输出使用 UTF-8 编码
#sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')

client = OpenAI(api_key="sk-8f9a5d6615864b488d6e0b83bb80e734", base_url="https://api.deepseek.com/v1")
def sanitize_json(json_str: str) -> str:
    """修复常见的非标准JSON格式"""
    # 替换单引号为双引号
    json_str = re.sub(r"(?<!\\)'", '"', json_str)
    # 移除注释
    json_str = re.sub(r"//.*", "", json_str)
    # 修复未闭合的引号
    json_str = re.sub(r'([{,]\s*)(\w+)(\s*:)', r'\1"\2"\3', json_str)
    return json_str


def Range(ans):
    # try:
        # # 读取 Excel 文件
        # excel_file_path = r'D:\桌面\HanLP2.0.xlsx'
        # excel_sheet_name = 'Sheet1'  # 请将此替换为你的 Excel 工作表名称
        # excel_df = pd.read_excel(excel_file_path, sheet_name=excel_sheet_name)

        # # 检查 Excel 中是否存在 'Words' 列
        # if 'Words' not in excel_df.columns:
        #     raise KeyError("Excel 中不存在 'Words' 列")

        # with open("D:\\VSCODE\\python\\object\\dachuang\\code\\分词结果\\hanLP2.0_segmentation_results.txt", "r", encoding='UTF-8') as f:
        #     lines = f.read().splitlines()

        # words = []
        # for line in lines:
        #     word = line.split()
        #     for i in word[:1]:
        #         words.append(i)

        # print(words)

        # 连接 Access 数据库
        access_db_path = r'analysis\DB01.accdb'  # 请将此替换为你的 Access 数据库文件路径
        conn_str = (
            r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
            rf'DBQ={access_db_path};'
        )
        conn = pyodbc.connect(conn_str)
        cursor = conn.cursor()

        # 假设 Access 数据库中有一个名为 Words 的表，包含一列名为 Word 的词汇
        query = "SELECT 词汇,词标引编号 FROM 词汇总表v2"  # 假设引标号是词汇总表v2中的一个列
        try:
            cursor.execute(query)
            access_words_dict = {}
            for row in cursor.fetchall():
                access_words_dict[row[0]] = row[1]  # 以词汇为键，引标号为值存储在字典中
        except pyodbc.Error as db_error:
            print(f"执行 SQL 查询时出错: {db_error}")
            return

        # 对比匹配
        matched_words = []
        unmatched_words = []
        levels=[{'level':'一级','words':[],'num':0},{'level':'二级','words':[],'num':0},{'level':'三级','words':[],'num':0},{'level':'四级','words':[],'num':0},{'level':'五级','words':[],'num':0},{'level':'六级','words':[],'num':0},{'level':'高级','words':[],'num':0}]
        for word_atr in ans:
            if word_atr['word'] in access_words_dict:
                matched_words.append({'word':word_atr['word'],'num':word_atr['num'],'level':access_words_dict[word_atr['word']]})
                # word_atr['level']=access_words_dict[word_atr['word']]
                # matched_word_indices.append(access_words_dict[word_atr['word']])
            else:
                unmatched_words.append({'word':word_atr['word'],'num':word_atr['num']})

        for elems in matched_words:
            if elems['level'][1]=='一':
                  levels[0]['words'].append(elems['word'])
                  levels[0]['num']+=elems['num']
            if elems['level'][1]=='二':
                  levels[1]['words'].append(elems['word'])
                  levels[1]['num']+=elems['num']
            if elems['level'][1]=='三':
                  levels[2]['words'].append(elems['word'])
                  levels[2]['num']+=elems['num']
            if elems['level'][1]=='四':
                  levels[3]['words'].append(elems['word'])
                  levels[3]['num']+=elems['num']
            if elems['level'][1]=='五':
                  levels[4]['words'].append(elems['word'])
                  levels[4]['num']+=elems['num']
            if elems['level'][1]=='六':
                  levels[5]['words'].append(elems['word'])
                  levels[5]['num']+=elems['num']
            if elems['level'][1]=='高':
                  levels[6]['words'].append(elems['word'])
                  levels[6]['num']+=elems['num']

        with open('levels.json','w',encoding='utf-8') as fp:
            fp.write(json.dumps(levels,ensure_ascii=False))
        a=1
        max=len(unmatched_words)
        while 1:
            temp=[]
            if a+200<=max:
                for i in range(a,a+200):
                    temp.append(unmatched_words[i])
                a+=200
            else:
                for i in range(a,max):
                    temp.append(unmatched_words[i])
                a=max

            unmatched_words_json=json.dumps(temp,ensure_ascii=False)


            system_prompt="""你是一个将词语根据初学者学习难度(笔画多少,词意难易)进行等级划分,划分为一级,二级,三级,四级,五级,六级和高级的帮助者,你会收到一个json格式的数据unmatched_words,unmatched_words中只有词语和对应的数量而没有等级,你要根据词汇的学习难度进行等级划分,对unmatched_words中的词语进行评级,并以键值对的形式添加到unmatched_words中,如果其中有明显不符合语法的词语可将其删去,最终结果只以json形式输出"""

            user_prompt=f'''
            unmatched_words:
            {unmatched_words_json}
            请对unmatched_words中的词语进行评级,例如,如果unmatched_words中的一个词语"谦虚"被你评为高级词汇,那么就在其中加入'level':"高级",注意levels中只有一至六级和高级,你匹配的词只能在这七个等级之中。如果其中有明显不符合语法的词语可将其删去,最终结果只以json形式输出
            '''


            messages = [{"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}]

            response = client.chat.completions.create(
                model="deepseek-chat",  # 修改为正确的模型名称
                messages=messages,
                max_tokens=8192,
                response_format={'type': 'json_object'}
            )
            # with open('ans.json','w',encoding='utf-8') as fp:
            #     fp.write(response.choices[0].message.content)
            un_ans=json.loads(response.choices[0].message.content)
            # print(un_ans)
            for elems in un_ans['unmatched_words']:
                try:
                    if elems['level'][0]=='一':
                        levels[0]['words'].append(elems['word'])
                        levels[0]['num']+=elems['num']
                    if elems['level'][0]=='二':
                        levels[1]['words'].append(elems['word'])
                        levels[1]['num']+=elems['num']
                    if elems['level'][0]=='三':
                        levels[2]['words'].append(elems['word'])
                        levels[2]['num']+=elems['num']
                    if elems['level'][0]=='四':
                        levels[3]['words'].append(elems['word'])
                        levels[3]['num']+=elems['num']
                    if elems['level'][0]=='五':
                        levels[4]['words'].append(elems['word'])
                        levels[4]['num']+=elems['num']
                    if elems['level'][0]=='六':
                        levels[5]['words'].append(elems['word'])
                        levels[5]['num']+=elems['num']
                    if elems['level'][0]=='高':
                        levels[6]['words'].append(elems['word'])
                        levels[6]['num']+=elems['num']
                except:
                    print(0)

            if a==max:
                break
        return levels

        # with open('levels.json','w',encoding='utf-8') as fa:
        #      fa.write(levels)
        # with open('un.json','w',encoding='utf-8') as ffg:
        #      ffg.write(unmatched_words)

def grammar_stats(text):
    ans = {'一级语法':0,'二级语法':0,'三级语法':0,'四级语法':0,'五级语法':0,'六级语法':0,'七——九级语法':0}
    access_db_path = r"analysis\DB01.accdb"  # 请将此替换为你的 Access 数据库文件路径
    conn_str = (
        r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
        rf'DBQ={access_db_path};'
    )
    conn = pyodbc.connect(conn_str)
    cursor = conn.cursor()
    query = "SELECT 分级, 三级分类 FROM 语法总表v2"
    cursor.execute(query)
    grammar = {'一级语法':['方位名词'],'二级语法':[],'三级语法':[],'四级语法':[],'五级语法':[],'六级语法':[],'七——九级语法':[]}
    temp='方位名词'
    for row in cursor.fetchall():
        # access_words_dict[row[0]] = row[1]  # 以词汇为键，引标号为值存储在字典中
        # print(row)
        if row[1] == temp :
            continue
        else:
            grammar[row[0]].append(row[1])
            temp = row[1]

    all_paragraph=text.split('\n')
    num=10
    for paragraph in all_paragraph:
        all_sentence=paragraph.split('。')
        all_sentence_len=len(all_sentence)
        times=int(all_sentence_len/num)
        for j in range(1,times+2):
            sentence=''
            if j!=times+1:
                two_times=num*j
                for z in range(two_times-num,two_times):
                    sentence=sentence+all_sentence[z]
            else:
                for z in range(times*num,all_sentence_len):
                    sentence=sentence+all_sentence[z]
            system_prompt = '''
            你必须严格遵循以下要求：
            1. 输出必须是 **标准JSON格式**
            2. 所有键名必须用英文双引号包裹
            3. 所有字符串值必须用英文双引号包裹
            4. 禁止包含注释、多余空格或换行符
            5. 数值直接写数字（不要加双引号）

            示例正确格式：
            {
              "一级语法": 1,
              "二级语法": 2,
              "三级语法": 0,
              "四级语法": 0,
              "五级语法": 0,
              "六级语法": 0,
              "七——九级语法": 0
            }

            现在请分析以下句子：
            '''

            user_prompt = f'''
                sentence:
                {sentence}
                grammar:
                {grammar}
                请严格按照上述格式要求输出 JSON
                '''


            messages = [{"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}]

            # 同样修改 grammar_stats 函数中的模型名称
            response = client.chat.completions.create(
                model="deepseek-chat",  # 修改为正确的模型名称
                messages=messages,
                max_tokens=8192,
                response_format={'type': 'json_object'}
            )
            # with open('ans.json','w',encoding='utf-8') as fp:
                # fp.write(response.choices[0].message.content)
            raw_response = response.choices[0].message.content
            print("DEBUG_RAW_RESPONSE:", raw_response)  # 调试输出
            # 清洗响应内容
            clean_response = sanitize_json(raw_response)

            try:
                un_ans = json.loads(clean_response)
            except json.JSONDecodeError as e:
                print(f"最终清洗后内容仍无效: {clean_response}")
                un_ans = {
                    "一级语法": 0, "二级语法": 0, "三级语法": 0,
                    "四级语法": 0, "五级语法": 0, "六级语法": 0,
                    "七——九级语法": 0
                }
            for i in un_ans.keys():
                ans[i]+=un_ans[i]
    return ans

def semantic(text):
    system_prompt='''
    我将为你提供一段文本text,请总结它的文本的主题特征,内容特征,文本特征,结果只以json格式输出,例如
    {
    "主题特征": "话题既涉及个人生活、也包括社会生活和自然环境。话题内容和学习者生活工作相关，可以包括人际关系、生活方式、学习 方法、自然环境、社会现象等。",
    "内容特征": "   内容具有一定深度，有助于读者发展逻辑思维能力并对事物形成不同的 分析视角，具备较强的跨文化特征。情节发展曲折，可构建悬念。人物性格更加复杂，具备多面性",
    "文本特征": "以记叙文为主，辅以一定比例的说明文和议论文，包括适量的非连续性 文本。如现实生活故事、传统文学等故事类文体；科普文章、传记等非 故事类文体；简单的诗歌等。",
    }
    '''

    user_prompt=f'''
    text:
    {text}
    我将为你提供一段文本,请总结它的文本的主题特征,内容特征,文本特征,结果只以json格式输出
    '''


    messages = [{"role": "system", "content": system_prompt},
        {"role": "user", "content": user_prompt}]

    # 修改 semantic 函数中的模型名称
    response = client.chat.completions.create(
        model="deepseek-chat",  # 修改为正确的模型名称
        messages=messages,
        max_tokens=8192,
        response_format={'type': 'json_object'}
    )
    # with open('ans.json','w',encoding='utf-8') as fp:
        # fp.write(response.choices[0].message.content)
    un_ans=json.loads(response.choices[0].message.content)
    return un_ans


def suggest(vocabulary_level, grammar_stats):
    system_prompt = '''
    我将为你提供两个json格式的文件,分别是vocabulary_level,grammar_stats,其中vocabulary_level是词汇等级分布,grammar_stats是语法等级分布
    请根据这两个json的信息对其中的数据进行分析
    '''

    user_prompt = f'''
    我将为你提供两个json格式的文件,分别是vocabulary_level,grammar_stats,其中vocabulary_level
    词汇等级分布,grammar_stats是语法等级分布
    vocabulary_level:{vocabulary_level}
    grammar_stats:{grammar_stats}
    请根据这两个json的信息对其中的数据进行分析
    '''

    messages = [{"role": "system", "content": system_prompt},
                {"role": "user", "content": user_prompt}]

    response = client.chat.completions.create(
        model="deepseek-chat",
        messages=messages,
        max_tokens=8192,
    )
    return response.choices[0].message.content

#         # 检查列表长度
#         print(f"Matched Words 长度: {len(matched_words)}")
#         print(f"Matched Word Indices 长度: {len(matched_word_indices)}")
#         print(f"Unmatched Words 长度: {len(unmatched_words)}")

#         # 创建结果 DataFrame
#         # 使用 zip 函数处理不同长度列表的情况
#         # result_data = []
#         # for word, matched, index in zip(excel_df['Words'].tolist(), [1 if w in matched_words else 0 for w in excel_df['Words'].tolist()],
#         #                            [access_words_dict.get(word, None) if word in matched_words else None for word in excel_df['Words'].tolist()]):
#         #     result_data.append((word, matched, index))

#         # result_df = pd.DataFrame(result_data, columns=['Words from Excel', 'Matched', 'Matched Word Indices'])

#         # # 将结果保存到新的 Excel 文件
#         # result_df.to_excel('comparison_result.xlsx', index=False)
#         matched_df = pd.DataFrame({
#         'Matched Words': matched_words,
#         'Indices': matched_word_indices
#         })

#         # 创建一个 DataFrame 来保存 unmatched_words
#         unmatched_df = pd.DataFrame({
#             'Unmatched Words': unmatched_words
#         })

#         # 使用 ExcelWriter 来将 DataFrame 保存到不同的工作表
#         with pd.ExcelWriter('pkuseg.xlsx', engine='openpyxl') as writer:
#             matched_df.to_excel(writer, sheet_name='Matched Words', index=False)
#             unmatched_df.to_excel(writer, sheet_name='Unmatched Words', index=False)

#     except FileNotFoundError as fnf_error:
#         print(f"文件未找到错误: {fnf_error}")
#     except pyodbc.Error as db_error:
#         print(f"数据库连接或查询错误: {db_error}")
#     except KeyError as key_error:
#         print(f"键错误: {key_error}")
#     except Exception as e:
#         print(f"其他错误: {e}")
#     finally:
#         # 关闭数据库连接
#         if cursor:
#             cursor.close()
#         if conn:
#             conn.close()

