import copy
import os
import time
import openpyxl
import ujson as json
from enums.report.report_export_file_type import DataCacheEnum
from enums.report.report_status_enum import ReportStatusEnum
import model.report.p_report_model as model
from utils.analysis.common.mediation_common_class import SEMRelationConfig
from utils.analysis.describe_analysis import describe
from utils.common.db_connection import db
from exception.api_exception import APIException
import tempfile
import pandas as pd
import io
from utils.analysis import validity_analysis, reliability_analysis, correlation_analysis, regressive_analysis, \
    frequency_analysis, dcxy_analysis
from utils.analysis.mediation import mediation_model1, mediation_model4, mediation_model6, mediation_model7, \
    mediation_model8, mediation_model14, mediation_model15
from utils.report import word_mediation_model1, word_mediation_model4, word_mediation_model14, word_mediation_model15, \
    word_mediation_model6, word_mediation_model7, word_mediation_model8, word_base_algorithm, word_cfa, word_sem, \
    word_describe, word_dcxy
from utils.analysis.cfa import cfa_r
from utils.analysis.sem import sem_r
from docx import Document

from utils.report.base import TableCount, set_base_style


def title_tid(record_id: int):
    # 1. 查询标题信息
    # 2. 查询配置信息
    try:
        title_config = db.session.query(model.PReportDatas).filter_by(record_id=record_id).first()
        user_config = db.session.query(model.PReportConfig).filter_by(record_id=record_id).first()
    except Exception as e:
        raise APIException(message="查询配置信息出错")
    finally:
        db.session.close()
    res = dict()
    res['title_config'] = []
    res['user_config'] = {}
    if title_config:
        res['title_config'] = title_config.titles
    if user_config:
        res['user_config'] = user_config.config
    return res


def analysis(record_id: int, config: json):
    # 1. 查询所有数据，和标题tid信息
    db_obj = model.PReportDatas.query.filter_by(record_id=record_id).first()
    title_config = db_obj.titles
    title_tid_dict = dict()
    for i in title_config:
        title_tid_dict[i.get('tid')] = i.get('name')
    # 2. 根据config的配置将数据分组
    d = dict()
    if not config:
        raise APIException(message="配置为空，无法生成报告")
    for i in config:
        d[i.get('name')] = i
    # 保存配置到数据库
    db_config_obj = model.PReportConfig.query.filter_by(record_id=record_id).first()
    now = int(time.time() * 1000)
    if db_config_obj:
        db_config_obj.config = config
        db_config_obj.update_time = now
    else:
        db_config_obj = model.PReportConfig(record_id=record_id, config=config, create_time=now, update_time=now)
    db.session.add(db_config_obj)
    db.session.commit()
    global_config = d['全局'].get('config')
    if not global_config:
        raise APIException(message="全局默认配置为空，无法生成报告")
    global_groups = global_config.get('groups')
    global_groups_uuid_dict = dict()
    for i in global_groups:
        global_groups_uuid_dict[i.get('uuid')] = i.get('name')
    va_name_arr = [global_groups_uuid_dict.get(i) for i in global_config.get('va')]
    da_name_arr = [global_groups_uuid_dict.get(i) for i in global_config.get('da')]
    ma_name_arr = [global_groups_uuid_dict.get(i) for i in global_config.get('ma')]
    wa_name_arr = [global_groups_uuid_dict.get(i) for i in global_config.get('wa')]
    # ca_name_arr = [i.get('name') for i in global_config.get('ca')]
    ca_name_arr = []
    for i in global_config.get('ca'):
        # json_obj = json.loads(i)
        # ca_name_arr.append(json_obj.get('name'))
        ca_name_arr.append(i.get('name'))
    global_va = da_name_arr  # 因变量
    global_da = va_name_arr  # 自变量
    global_ma = ma_name_arr  # 中介变量
    global_wa = wa_name_arr  # 调节变量
    global_ca = ca_name_arr  # 控制变量
    df = pd.DataFrame(db_obj.datas)
    df = df.rename(columns=title_tid_dict)
    # 计算mean_df
    mean_df = pd.DataFrame()  # 均值的df
    total_df = copy.deepcopy(df)  # 基础数据+计算均值的df
    choose_group_df = copy.deepcopy(df)  # 选中分组的所有列组成的df
    cur_clos = []
    global_groups_dict = dict()  # 转为字典结构
    for relation_i in range(len(global_groups)):
        cur_group = global_groups[relation_i]
        group_name = cur_group['name']  # 当前分组的df中的列名称
        cur_arr = cur_group['items']  # 当前分组的df中的列名称
        tids = []
        for i in cur_arr:
            tids.append(i.get('name'))
            cur_clos.append(i.get('name'))
        # cur_arr_loc = [key for key, _ in cur_arr.items()]
        cur_arr_loc = tids
        global_groups_dict[group_name] = cur_arr
        group_df = df.loc[:, cur_arr_loc]  # 当前分组的df
        group_df = group_df.astype(float)  # 把所有数据转为float
        row_mean = group_df.mean(axis=1)  # 这组df的均值
        row_mean_df = pd.DataFrame({group_name: row_mean})
        total_df[group_name] = row_mean_df
        if mean_df.empty:
            mean_df = row_mean_df
        else:
            mean_df = mean_df.join(row_mean_df)
    choose_group_df = choose_group_df[cur_clos]
    # 3. 进行算法分析，返回算法分析结果
    # a. 信度，只有第一个结果需要关心
    doc = Document()
    set_base_style(doc)  # 设置字体和字体大小
    table_count_obj = TableCount(1)
    if d['信度'].get('is_checked') and bool(d['信度'].get('is_checked')):
        cur_df = copy.deepcopy(choose_group_df)
        cur_df = cur_df.astype(float)  # 把所有数据转为float
        ra_res = reliability_analysis.report_do_analysis(cur_df, mean_df, global_groups)
        word_base_algorithm.re_report(document=doc, reliability_analysis_obj=ra_res, table_count=table_count_obj.count)
    # b。效度
    if d['效度'].get('is_checked') and bool(d['效度'].get('is_checked')):
        cur_df = copy.deepcopy(choose_group_df)
        # cur_clos = []
        # for gi in global_groups:
        #     cur_items = gi.get('items')
        #     if cur_items:
        #         for gj in cur_items:
        #             cur_clos.append(gj.get('name'))
        # cur_df = cur_df[cur_clos]
        cur_df = cur_df.astype(float)  # 把所有数据转为float
        validity_obj = validity_analysis.Validity(cur_df, global_groups, 0, 1)
        va_res = validity_obj.report_do_analysis()
        word_base_algorithm.va_report(va_obj=va_res, document=doc, table_count=table_count_obj.count)
    # c. 相关
    if d['相关'].get('is_checked') and bool(d['相关'].get('is_checked')):  # 平均值和标准差
        correlation_obj = correlation_analysis.Correction(mean_df)
        ca_res = correlation_obj.calculate_p()  # 进行相关分析
        describe_res = describe(mean_df)  # 计算均值和标准差
        word_base_algorithm.co_report(co_arr=ca_res, document=doc, describe_res=describe_res)
    if d['回归'].get('is_checked') and bool(d['回归'].get('is_checked')):
        obj = regressive_analysis.RegressiveAnalysis(mean_df, global_da, global_va, 1)
        regressive_res = obj.report_analysis()
        word_base_algorithm.regressive_report(regressive_obj=regressive_res, document=doc,
                                              table_count=table_count_obj.count)
    if d['频数'].get('is_checked') and bool(d['频数'].get('is_checked')):
        fre_df = copy.deepcopy(df)
        fre_config = d['频数'].get('config')
        if fre_config:
            if fre_config.get('items'):
                fre_items = fre_config.get('items')
                cur_names = []
                for i in fre_items:
                    cur_names.append(i.get('name'))
                fre_df = fre_df[cur_names]
        fa_obj = frequency_analysis.ReportFrequencyAnalysis(fre_df)
        fa = fa_obj.do_analysis()
        word_base_algorithm.frequency_report(document=doc, frequency_obj=fa, table_count=table_count_obj.count)
    if d['描述'].get('is_checked') and bool(d['描述'].get('is_checked')):
        describe_df = copy.deepcopy(df)
        des_config = d['描述'].get('config')
        if des_config:
            if des_config.get('items'):
                fre_items = des_config.get('items')
                cur_names = []
                for i in fre_items:
                    cur_names.append(i.get('name'))
                describe_df = describe_df[cur_names]
        src_data = describe(describe_df)
        word_describe.generate(src_data=src_data, doc=doc, table_count=table_count_obj.count)
    if d['多重响应'].get('is_checked') and bool(d['多重响应'].get('is_checked')):
        mult_config = d['多重响应'].get('config')
        res = dcxy_analysis.dcxy_analysis(df, mult_config.get('items'))
        word_dcxy.generate(res, doc, table_count=table_count_obj.count)
        table_count_obj.add_table_count()
    if d['中介'].get('is_checked') and bool(d['中介'].get('is_checked')):
        zj_df = total_df.copy()
        zj_config = d['中介'].get('config')
        intermediary_type = zj_config.get('intermediary_type')  # 选择中介模型
        intermediary_count = zj_config.get('intermediary_count')  # 中介次数
        if not intermediary_type:
            raise APIException(message="中介参数-中介类型-未填写")
        if not intermediary_count:
            raise APIException(message="中介参数-bootstrap抽样次数-未填写")
        if 1 == intermediary_type:  # 模型4
            obj = mediation_model4.MediationModel4(zj_df, global_da, global_va, global_ma, global_ca,
                                                   n_boot=intermediary_count)
            al_res = obj.analysis()
            doc = word_mediation_model4.generate(al_res, doc=doc)
        elif 2 == intermediary_type:  # 模型6
            if len(global_ma) < 2:
                raise APIException(message="链式中介的中介变量个数必须大于等于两个")
            obj = mediation_model6.MediationModel6(zj_df, global_da, global_va, global_ma, global_ca,
                                                   n_boot=intermediary_count)
            al_res = obj.analysis()
            doc = word_mediation_model6.generate(al_res, doc=doc)
    if d['调节'].get('is_checked') and bool(d['调节'].get('is_checked')):
        tj_df = total_df.copy()
        zj_config = d['调节'].get('config')
        model_type = zj_config.get('model_type')  # 选择中介模型
        if 1 == model_type:
            if not global_va or not global_da or not global_ma:
                raise APIException("调节变量参数缺失")
        else:
            if not global_va or not global_da or not global_ma or not global_wa:
                raise APIException("调节变量参数缺失")
        if 1 == model_type:
            me4_obj = mediation_model1.MediationModel1(tj_df, global_da, global_va, global_wa, global_ca)
            me4_al_res = me4_obj.analysis()
            doc = word_mediation_model1.generate(me4_al_res, doc=doc)
        elif 7 == model_type:
            obj = mediation_model7.MediationModel7(tj_df, global_da, global_va, global_ma, global_wa, global_ca)
            al_res = obj.analysis()
            doc = word_mediation_model7.generate(al_res, doc=doc)
        elif 8 == model_type:
            obj = mediation_model8.MediationModel8(tj_df, global_da, global_va, global_ma, global_wa, global_ca)
            al_res = obj.analysis()
            doc = word_mediation_model8.generate(al_res, doc=doc)
        elif 14 == model_type:
            obj = mediation_model14.MediationModel14(tj_df, global_da, global_va, global_ma, global_wa, global_ca)
            al_res = obj.analysis()
            doc = word_mediation_model14.generate(al_res, doc=doc)
        elif 15 == model_type:
            obj = mediation_model15.MediationModel15(tj_df, global_da, global_va, global_ma, global_wa, global_ca)
            al_res = obj.analysis()
            doc = word_mediation_model15.generate(al_res, doc=doc)
    if d['验证性分析'].get('is_checked') and bool(d['验证性分析'].get('is_checked')):
        # cfa_df = df.copy()
        cfa_df = copy.deepcopy(df)
        # 生成当前分组的字典，以及名字映射关系的字段
        cur_group_dict = dict()
        name_dict = dict()  # 用于映射用户输入的key和系统参与算法的key，生成报告需要使用
        name_to_sys_name_dict = dict()  # 用于映射用户输入的key和系统参与算法的key，生成报告需要使用
        cfa_config = d['验证性分析'].get('config')
        for k, v in global_groups_dict.items():
            v_arr = []
            sys_ana_key = "sys" + str(k)
            for vi in v:
                v_arr.append(vi.get('name'))
            cur_group_dict[sys_ana_key] = v_arr
            name_dict[sys_ana_key] = k
            name_to_sys_name_dict[k] = sys_ana_key
        # 生成需要关联的数据
        relations = list()
        if cfa_config:
            config_relations = cfa_config.get("relations")
            for ci in config_relations:
                if bool(ci.get('is_selected')):
                    from_name = name_to_sys_name_dict.get(ci.get('fromName'))
                    to_name = name_to_sys_name_dict.get(ci.get('toName'))
                    relations.append(SEMRelationConfig(from_name, to_name))
        obj = cfa_r.CFA(cfa_df, cur_group_dict, relations)
        cfa_res = obj.analysis_v2()
        doc = word_cfa.generate(cfa_res, doc=doc, name_dict=name_dict)
    if d['结构模型'].get('is_checked') and bool(d['结构模型'].get('is_checked')):
        # sem_df = df.copy()
        sem_df = copy.deepcopy(df)
        # 生成当前分组的字典，以及名字映射关系的字段
        cur_group_dict = dict()
        name_dict = dict()  # 用于映射用户输入的key和系统参与算法的key，生成报告需要使用
        name_to_sys_name_dict = dict()  # 用于映射用户输入的key和系统参与算法的key，生成报告需要使用
        for k, v in global_groups_dict.items():
            v_arr = []
            sys_ana_key = "sys" + str(k)
            for vi in v:
                v_arr.append(vi.get('name'))
            cur_group_dict[sys_ana_key] = v_arr
            name_dict[sys_ana_key] = k
            name_to_sys_name_dict[k] = sys_ana_key
        # 判断有多少种相关的关系
        relations = list()
        sem_config = d['结构模型'].get('config')
        if sem_config:
            config_relations = sem_config.get("relations")
            for ci in config_relations:
                if bool(ci.get('is_selected')):
                    from_name = name_to_sys_name_dict.get(ci.get('fromName'))
                    to_name = name_to_sys_name_dict.get(ci.get('toName'))
                    relations.append(SEMRelationConfig(from_name, to_name))
        # 维护一个用户的输入key和系统对应生成的key
        obj = sem_r.SEM(sem_df, cur_group_dict, relations)
        sem_res = obj.analysis()
        doc = word_sem.generate(sem_res, doc=doc, name_dict=name_dict)
    # doc.save('demo.docx')
    # 保存 src_data 到临时文件
    # 创建字节IO对象
    stream_file = io.BytesIO()
    # 保存Document对象到字节IO对象中
    doc.save(stream_file)
    # 获取字节IO对象的内容（流）
    # content = stream_file.getvalue()
    # with tempfile.NamedTemporaryFile(delete=False) as temp_file:
    #     temp_file.write(content)
    # # 删除临时文件
    # temp_file.close()
    # os.remove(temp_file.name)
    stream_file.seek(0)
    return stream_file, "123"


def check_valid_analysis():
    """
    校验数据是否能参与分析
    :return:
    """
    return


def export_report(record_id, export_type: str):
    # 1. 调用analysis
    # 2. 将结果封装为对应的文档类型
    if DataCacheEnum.EXCEL.value == export_type:
        pass
    elif DataCacheEnum.WORD.value == export_type:
        pass
    elif DataCacheEnum.PDF.value == export_type:
        pass
    else:
        # 未找到对应导出文件类型，导出失败
        raise APIException(message="导出文件类型未知")

    title_config = db.session.query(model.PReportDatas).filter_by(record_id=record_id).first()
    record = model.PReportRecord.query.get(record_id)
    if not record or not title_config:
        raise APIException(message="未查询到数据")

    # 1. 进行数据分析
    # 2. 生成word文档
    # 3. 返回word流

    file_name = record.file_name or '未知文件'
    datas = title_config.datas  # 数据
    titles = title_config.titles  # 标题
    # 2. 封装为excel文件并导出
    try:
        data_arr = datas
        if titles:
            # 遍历所有标题，取出每一列对应的标题信息
            title_arr = []
            for i in titles:
                title_arr.append(i.name)
            data_arr = [title_arr] + data_arr  # 将表头插入到第一行
        df = pd.DataFrame(data_arr)
        excel_data = io.BytesIO()
        with pd.ExcelWriter(excel_data, engine='xlsxwriter') as writer:
            df.to_excel(writer, index=False, header=False)
        excel_data.seek(0)
        res = excel_data.getvalue()
        excel_data.close()
    except Exception as e:
        print(e)
        raise APIException(message="导出数据出错")
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    temp_file.write(res)
    temp_file.close()
    return temp_file, file_name


def search_data(record_id: int):
    # 1. 通过id查询数据信息
    title_config = db.session.query(model.PReportDatas).filter_by(record_id=record_id).first()
    record_item = db.session.query(model.PReportRecord).get(record_id)
    if not title_config or not record_item:
        raise APIException(message="未查询到数据")
    datas = title_config.datas  # 数据
    titles = title_config.titles  # 标题
    res = dict()
    res['datas'] = datas
    res['titles'] = titles
    res['file_name'] = record_item.file_name
    return res


def modify_file_name(record_id: int, file_name: str):
    # 通过id修改文件名称
    if not file_name:
        raise APIException(message="新文件名称未空，无法修改")
    item = model.PReportRecord.query.get(record_id)
    if not item:
        raise APIException(message="文件不存在")
    item.file_name = file_name
    db.session.add(item)
    db.session.commit()
    return "修改成功"


def record_config(record_id: int):
    has_history_config = False
    db_obj = model.PReportConfig.query.filter_by(record_id=record_id).first()
    if db_obj:
        cur_config = db_obj.config
        has_history_config = True
    else:
        cur_config = {}
    res = {'has_config': has_history_config, 'config': cur_config}
    return res


def record_list(page_num, page_size):
    # 查询列表信息
    items = (db.session.query(model.PReportRecord).order_by(model.PReportRecord.create_time.desc()).
             filter_by(status=ReportStatusEnum.USING.value).offset(page_num * page_size).limit(page_size).all())
    res = dict()
    cur_list = []
    for i in items:
        obj = {'upload_time': i.upload_time, 'file_name': i.file_name, 'record_id': i.id}
        cur_list.append(obj)
    res['list'] = cur_list
    item_all = db.session.query(model.PReportRecord).filter_by(status=ReportStatusEnum.USING.value).all()
    res['total_size'] = len(item_all)
    return res


def delete_file(record_id: int):
    # 1. 通过id逻辑删除，置为False
    item = model.PReportRecord.query.get(record_id)
    if not item:
        raise APIException(message="文件不存在")
    item.status = ReportStatusEnum.DELETED.value
    db.session.add(item)
    db.session.commit()
    db.session.close()
    return "删除成功"


def download_data(record_id):
    # 1. 查询数据和标题
    title_config = db.session.query(model.PReportDatas).filter_by(record_id=record_id).first()
    record = model.PReportRecord.query.get(record_id)
    if not record or not title_config:
        raise APIException(message="未查询到数据")
    file_name = record.file_name or '未知文件'
    datas = title_config.datas  # 数据
    titles = title_config.titles  # 标题
    # 2. 封装为excel文件并导出
    try:
        data_arr = datas
        if titles:
            # 遍历所有标题，取出每一列对应的标题信息
            title_arr = []
            for i in titles:
                title_arr.append(i.get('name'))
            data_arr = [title_arr] + data_arr  # 将表头插入到第一行
        df = pd.DataFrame(data_arr)
        excel_data = io.BytesIO()
        with pd.ExcelWriter(excel_data, engine='xlsxwriter') as writer:
            df.to_excel(writer, index=False, header=False)
        excel_data.seek(0)
        res = excel_data.getvalue()
        excel_data.close()
    except Exception as e:
        print(e)
        raise APIException(message="导出数据出错")
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    temp_file.write(res)
    temp_file.close()
    return temp_file, file_name


def upload_data(file_data):
    # 1. 保存数据
    try:  # 读取excel文件数据
        stream = io.BytesIO(file_data.read())
        book = openpyxl.load_workbook(stream, data_only=True)  # 将文件转为流，在xlrd中打开
        sheet1 = book.worksheets[0]
    except Exception:
        raise APIException(message="读取文件数据失败")
    # 输出每一行的内容
    data_array = []
    for index, row_tuple in enumerate(sheet1.iter_rows(values_only=True), start=0):
        # 在这里对每一行数据进行处理
        if index == 0:
            continue
        row = list(row_tuple)  # tuple 转list
        data_array.append(row)
    # data_array = sheet1.values.tolist()
    # data_array = data_array[1:]  # 过滤掉标题信息
    now = int(time.time() * 1000)
    file_name = file_data.filename
    record_obj = model.PReportRecord(upload_time=now, file_name=file_name, status=ReportStatusEnum.USING.value,
                                     create_time=now, update_time=now)
    db.session.add(record_obj)
    db.session.flush()
    # 2. 保存tid与标题的映射配置
    title_config = []
    for index, row_tuple in enumerate(sheet1.iter_rows(values_only=True), start=0):
        if index > 0:
            break
        row = list(row_tuple)  # # 处理标题，tuple 转list
        tid = 0
        for cell in range(len(row)):
            title_tid_dict = dict()
            title_tid_dict['name'] = row[cell]
            title_tid_dict['tid'] = tid
            title_config.append(title_tid_dict)
            tid += 1
    # data_array_json = json.dumps(data_array)
    # title_config_json = json.dumps(title_config)
    data_obj = model.PReportDatas(record_id=record_obj.id, datas=data_array, titles=title_config,
                                  create_time=now, update_time=now)
    db.session.add(data_obj)
    db.session.commit()
    # 3. 返回所有数据和标题配置
    res = dict()
    res['datas'] = data_array
    res['file_name'] = file_name
    res['record_obj'] = record_obj.id
    res['title_config'] = title_config
    return res
