
from api import Kimi
import  os
import json
from datetime import  datetime
from pathlib import Path
import pandas as pd
import pymysql
from  random import randint
from wanna import vn
from log import log

def to_database(sql, data,flag):
    conn = pymysql.connect(host='192.168.35.221', user='root', password='xxtc.mysql2@147258', db='llm', port=3307,
                           charset='utf8mb4')
    cursor = conn.cursor()
    try:

        if flag == 1:
            cursor.execute(sql, data)
        else:
            #采用批量写入
            cursor.executemany(sql, data)
        conn.commit()
        results = cursor.fetchall()
    except Exception as e:
        log.error(f"err:{e}, 操作失败的sql：{sql}及data：{data}")
        print(f'Error: {e}')
        conn.rollback()
        print(f'操作失败的sql：{sql}及data：{data}')
        results = ''
    finally:
        conn.close()
    return results


def get_new_sql(sql):
    new_fields =  datetime.now().strftime("%Y%m%d_%H%M%S_") + str(randint(0, 1000))
    start_index = sql.find("TABLE ")
    end_index = sql.find("(")
    part_one = sql[:start_index]
    table_name = sql[start_index:end_index].split(" ")[1]
    new_table_name = table_name.replace("`","") + new_fields
    part_two = sql[end_index:]
    new_sql = f"{part_one} TABLE {new_table_name} {part_two}"
    return new_sql,new_table_name

def get_cols_name(sql):
    start_index = sql.find("(")
    sql_a = sql[start_index+1:].split(",")[:-1]
    sql_b = [i.replace("\n","").replace("`","").strip().split(" ")[0] for i in sql_a]
    for i in ['更新时间','id','ID','序号']:
        if i in sql_b:
            sql_b.remove(i)
    return sql_b

def get_table_name(sql):
    start_index = sql.find("TABLE ")
    end_index = sql.find("(")
    name = sql[start_index:end_index].strip()
    return name.replace("TABLE","").replace("`","").strip()

def find_sql_table_name(col_list):
    txt_path = os.path.join(os.getcwd(), "create_table_line.txt")
    txt_sqls = []
    txt=""
    with open(txt_path, "r", encoding="utf-8") as f:
        for line in f:
            if "||||" in line:
                txt_sqls.append(txt)
                txt = ""
            else:
                if len(line)>0:
                    txt += line
    #获取每条一sql里面的col
    sql_cols = [get_cols_name(item) for item in txt_sqls]
    len_sql_cols = [len(i) for i in sql_cols]
    #大模型生成的cola和txt里面的colb，求cola和colb的包含关系
    llm_col_in_txt_cols = [len([i for i in col_list if i in sql_col]) for sql_col in sql_cols]
    #求cola等于colb的idx
    temp = [1 if i == j else 0 for i, j in zip(llm_col_in_txt_cols, len_sql_cols)]
    is_idx =[idx for idx, k  in enumerate(temp) if k == 1]
    if len(is_idx) == 0:
        return False
    else:
        return get_table_name(txt_sqls[is_idx[0]])


def insert_db(execl_path,table_name:str,col_list:list,skip_rows:int):
    # with open(execl_path, 'rb') as f:
    #     lw_read = openpyxl.load_workbook(f)
    #     ex_read = lw_read.active
    # a = [row for row in ex_read.iter_rows()][skip_rows:]
    df = pd.read_excel(execl_path,skiprows=skip_rows)
    rl = ["id","ID","序号","更新时间"]
    for  i in rl :
        if  i in col_list:
            col_list.remove(i)
    filter_df = df[col_list]
    #把空值不合适的值都去掉
    new_df = filter_df.round(2)
    new_df_filter =  new_df.where(pd.notna(filter_df), "")
    new_row = [tuple(row) for _, row in new_df_filter.iterrows()]
    s = ["%s"]*len(col_list)
    #检测是否存在
    for item in new_row:
        sql_data_zip = ["`"+i+"`="+j for i,j in zip(col_list,s)]
        sql = "select * from `" + table_name  + "` where " +" and ".join(sql_data_zip)
        sql_data = [str(i)  if len(str(i).strip())>0 else None for i in item]
        db_data = to_database(sql,sql_data ,1)
        if len(db_data)>0:
            log.info(f"{sql_data} 已经存在数据库中")
            print("已经存在")
            continue
        else:
            #插入数据
            sql = "insert into `" + table_name + "` (`" + "`,`".join(col_list) + "`)" + "values ("+",".join(s)+")"
            to_database(sql,sql_data,1)

def write_to_txt(txt_path,llm_sql):
    with open(txt_path, 'a', encoding="utf-8") as f:
        f.write(llm_sql)
        f.write("\n")
        f.write("||||")
        f.close()


def create_table_line(excel_file):
    is_create_table = True
    question = "请根据上述表格文件生成一个mysql的建表语句，要求1、增加一个字段为更新时间，使用mysql触发器；\
2、如果建表语句里面没有序号字段，就增加一个字段名为序号的字段，同时设置成PRIMARY KEY auto_increment，输入格式为json，内容为：\
{\"建表语句\": \"XXX\",\"建表所使用字段\":[XXX,XXX],\"建表名字\":XXX} 只用输出json，其他都不用输出"
    kimi = Kimi()
    path = Path(os.path.join(os.getcwd(), "user_file" , excel_file))
    try:
        k = kimi.kimi_file(file_path=path,question=question)
        r_k = k.replace("```json","").replace("```","")
        k_json = json.loads(r_k)
        llm_sql = k_json["建表语句"]
        llm_table_name = k_json["建表名字"]
        llm_col_list = k_json["建表所使用字段"]
    except Exception as e:
        print(e)
        return {"state": "生成建表语句失败"}
    llm_sql, llm_table_name = get_new_sql(llm_sql)
    #把建表语句写入txt
    txt_path = os.path.join(os.getcwd(), "create_table_line.txt")
    if not Path(txt_path).exists():
        Path(txt_path).touch()
        write_to_txt(txt_path, llm_sql)
    else:
        table_name = find_sql_table_name(llm_col_list)
        if table_name is not False:
            llm_table_name=table_name
            is_create_table = False
        else:
            write_to_txt(txt_path,llm_sql)
    #写入mysql数据
    df = pd.read_excel(path)
    rows = [tuple(row) for _, row in df.iterrows()]
    n = 1
    for idx ,row in enumerate(rows):
        len_n = [i for i in llm_col_list if i in row]
        if  len(len_n) >= len(llm_col_list)/2:
            n = idx+1
            break
    if is_create_table:
        to_database(llm_sql,(),1)
        # 插入数据库的时候就chromdb向量化存入数据库
        vn.train(ddl=llm_sql)

    try:
        insert_db(execl_path=path,table_name=llm_table_name.replace("`",""),col_list=llm_col_list,skip_rows=n)
    except Exception as e:
        log.error(f"插入数据失败:err:{e}")
        print(e)
        return {"state": "插入数据失败"}
    print({"state":"done"})
    return {"state":"done"}


if __name__ == '__main__':
    create_table_line("a.xlsx")
    llm_col_list = ['序号', '研究领域', '项目名称', '项目负责人', '项目类型', '计划开始时间', '计划结束时间', '周期（天）', '进度核算时间', '序时进度（自动生成）', '实际进度', '本周工作内容', '下周计划', '更新时间']
    # insert_db(execl_path=r"E:\llm\total_api\add_table_wanna\user_file\3月3日-7日周工作-寄递研究所.xlsx",table_name='寄递研究所工作安排',col_list=llm_col_list,skip_rows=1)
    # find_sql(col_list)
    # sql = "CREATE  TABLE `寄递研究所工作安排20250306_163725_928` ("
    # print(get_table_name(sql))
    # sql = "insert into `寄递研究所工作安排`(研究领域,项目名称,项目负责人,项目类型,计划开始时间,计划结束时间,周期（天）,进度核算时间,序时进度（自动生成）,实际进度,本周工作内容,下周计划)values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
    # # data = ['行业研究', '2025年寄递行业形势分析与趋势展望', '董晓珺', '研究专报', '2025-04-01 00:00:00','2025-12-30 00:00:00', 273, '2025-03-07 00:00:00', '', '', '4月启动', '']
    # data = ('行业研究', '2025年寄递行业形势分析与趋势展望', '董晓珺', '研究专报', '2025-04-01 00:00:00', '2025-12-30 00:00:00', 273, '2025-03-07 00:00:00', '', '', '4月启动', '')
    # to_database(sql,data,1)
    # sql = 'select * from `寄递研究所工作安排` where `研究领域`=%s and `计划开始时间`=%s and `计划结束时间`=%s'
    # sql = 'select * from `寄递研究所工作安排` where `研究领域`=%s and `项目名称`=%s and `项目负责人`=%s and `项目类型`=%s and `计划开始时间`=%s and `计划结束时间`=%s and `周期（天）`=%s and `进度核算时间`=%s  and `实际进度` =%s and `本周工作内容`=%s and `下周计划`=%s '
    # # data = ('行业研究', '2025-04-01 00:00:00', '2025-12-30 00:00:00')
    # data = ['规范化管理', '规范化管理工作', '马雪鹏', '集团委托', '2024-03-04 00:00:00', '2025-05-31 00:00:00', '453', '2025-03-07 00:00:00','0.81','1.县及县以下邮政运营规范化管理研究项目组联系河南省公司，针对手册内容征求意见，讨论并确定10个修改内容，完成了终稿校对工作；流程穿越项目组根据院领导要求调整报告架构，突出方法论的指导性作用，建立更加适用于邮政企业的四阶段7步骤的方法体系、流程穿越原则等内容。同时搜集整理收入补录、丢损防控相关制度文件，为该场景点后续流程穿越做准备；规范化教材配合出版社，整理修改教材中的图表，增加领导最新的要求和流程穿越方法论相关内容，同时对教材全文进行审核，进一步修改完善教材。', '流程穿越项目组对丢损防控场景开展流程穿越，确定穿越目标、范围及人员，梳理流程及规范，并制定流程穿越计划。']
    # data_sql = to_database(sql,data,1)
    # print("ok")