import os
import math
import time
import redis
import pymysql
import pandas as pd
from ast import literal_eval
from models import sess, Models
from chardet import UniversalDetector
from logger import logInfo, logWarning
from db_con import credibleIndex, dataIndex

# https://blog.csdn.net/wld914674505/article/details/81431128?utm_medium=distribute.pc_relevant_t0.none-task-blog-OPENSEARCH-1.channel_param&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-OPENSEARCH-1.channel_param
# 导出pip安装的所有放入一个文件中，并把通过这个安装所有的包
# 导出pip安装的所有的包：
# pip freeze > install.txt
# 在新的环境中安装导出的包
# 需要安装 pands chardet redis toml yaml 使用pip install xxx -r install.txt 安装
# pip install -r install.txt -i https://pypi.tuna.tsinghua.edu.cn/simple

detector = UniversalDetector()
redisCon = redis.Redis(host='127.0.0.1', port=6379, decode_responses=True, password='')
dictData = {}


# 导入目录中所有csv文件到redis
def import_data():
    logInfo('开始读取文件。')
    path = os.path.abspath('.')
    fileArr = []
    # 获取当前目录下所有csv文件并存入 fileArr
    for root, dirs, files in os.walk(path, topdown=False):
        for name in files:
            # 只要csv文件
            if os.path.splitext(name)[1] == '.csv':
                fileArr.append(name)
    for file in fileArr:
        # 先跳过优先权表
        if not file.find('优先权') == -1:
            print('先跳过优先权表')
            continue
        print('开始处理：' + file + '文件')
        logInfo('开始处理：' + file + '文件')
        # result_encoding = get_file_encoding(file)
        # print('获取到：' + file + '文件保存编码为：' + str(result_encoding))
        # logInfo('获取到：' + file + '文件保存编码为：' + str(result_encoding))
        # # 如果文件编码不是utf-8，放弃
        # if result_encoding['encoding'] != 'utf-8':
        #     logInfo('获取到：' + file + '文件保存编码不为utf-8，跳过该文件')
        #     continue
        # df = pd.read_csv(file, chunksize=10000, encoding=result_encoding['encoding'],
        # header=0, low_memory=False, error_bad_lines=False, na_filter=False)
        #
        # 直接以utf-8读取，只要出现错误就跳出循环
        try:
            df = pd.read_csv(file, chunksize=100000, encoding='utf-8', header=0, low_memory=False,
                             error_bad_lines=False, na_filter=False)
        except KeyError as e:
            logInfo('错误：' + file + '文件保存编码不为utf-8,现版本仅支持utf-8:' + e)
            continue

        for table in df:
            # 表头
            columnsList = table.columns.tolist()
            if '商品序号' in columnsList:
                # 先跳过注册商品服务信息
                print('先跳过注册商品服务信息')
                logInfo('先跳过注册商品服务信息')
                break
            elif '共有人中文名称' in columnsList:
                # 先跳过共有人信息
                print('先跳过共有人信息')
                logInfo('先跳过共有人信息')
                break
            # 数据
            dataList = table.values.tolist()
            # credibleColumns 可信表头名（文件中能最终导入数据库的相关字段名）
            credibleColumns = ''
            for data in dataList:
                columnsKey = 0
                # 可信字段在表中的位置
                credibleKey = 0
                # 想要的csv所有数据整理成{{表头：数据}，{表头：数据}}
                tableResDataResData = {}
                for columns in columnsList:
                    # 替换nan为null
                    if not isinstance(data[columnsKey], str) and math.isnan(data[columnsKey]):
                        data[columnsKey] = 'null'
                    tableResDataResData.update({columns: str(data[columnsKey])})
                    # 如果可信表头名未赋值且在credibleIndex中，给credibleColumns赋值，用作字典中的
                    if credibleColumns == '' and columns in credibleIndex:
                        credibleColumns = columns
                        credibleKey = columnsKey
                    columnsKey = columnsKey + 1

                dictKeyTem = credibleColumns + '__' + str(data[credibleKey])
                if '国际分类' in tableResDataResData:
                    index = 0
                    for columns in columnsList:
                        if columns == '国际分类':
                            break
                        else:
                            index = index + 1
                    dictKey = dictKeyTem + '__' + '国际分类__' + str(data[index])
                else:
                    dictKey = credibleColumns + '__' + str(data[credibleKey])

                # 搜索是否其他文件已经创建了相同的key，有就数据合并
                # print('原来-》')
                # print(resOther)
                if dictKey in dictData:
                    resOther = dictData[dictKey]
                    for resData in list(resOther):
                        # print(resData)
                        # 判断dictData中是否存在新的数据相同的键值并合并两次数据，若有相同键但数据不同则记录
                        if resData != credibleColumns and resData != '国际分类':
                            if (resData in tableResDataResData) and (resOther[resData] == tableResDataResData[resData]):
                                resOther.pop(resData)

                            elif resData in tableResDataResData and resOther[resData] != tableResDataResData[resData]:
                                msg = '数据冲突，在csv文件为：' + file + dictKey + '项中：原值为' + resData + '->' + str(
                                    resOther[resData])
                                msg += '新值为' + resData + '->' + str(tableResDataResData[resData]) + ',其中新值生效。'
                                logWarning(msg)
                                resOther.pop(resData)
                                dictData[dictKey] = Merge(resOther, tableResDataResData)
                            else:
                                pass
                else:
                    dictData[dictKey] = tableResDataResData
            logInfo('处理' + file + ' 100000条数据到distData中，完成')
            # 读取的数据丢distData distData 键为 列表中可信值 + 对应数据,值为所有数据的字典，过期时间暂时设置为一天
            # 待所有文件导入distData完成后，根据可信值列表重组数据（合并所有数据），导入数据库中
        del df
    import_mysql()


# 合并redis中产生的数据
def import_mysql():
    logInfo('开始redis导入到数据库')
    db = pymysql.connect('localhost', 'root', 'root', '')
    for item in dictData:
        data = dictData[item]
        mysqlImportData = {}
        keyIndex = 0
        for da in data:
            data[da] = str(data[da])
            if data[da] == 'null':
                data[da] = ''
            if da in dataIndex:
                if da.find('日期') != -1:
                    if data[da] != '':
                        data[da] = str(time.mktime(time.strptime(data[da], '%Y-%m-%d %H:%M')))
                    else:
                        data[da] = 0
                mysqlImportData.update({dataIndex[da]: data[da]})
            if da == '商标名称':
                mysqlImportData.update({'cn': data['商标名称']})

            elif da == '代理机构代码':
                for org in dictData:
                    orgData = dictData[org]
                    if data[da] == orgData[0]:
                        mysqlImportData.update({'orgName': orgData[1]})
                    else:
                        mysqlImportData.update({'orgName': 0})
            keyIndex += 1
        queryKey = ''
        queryValues = ''
        queryRe = ''
        for mysqlDataKey in list(mysqlImportData):
            if mysqlDataKey == '':
                mysqlImportData.pop(mysqlDataKey)
                continue
            if mysqlImportData[mysqlDataKey] == '':
                mysqlImportData[mysqlDataKey] = 0
            if queryKey == '':
                queryKey += str(mysqlDataKey)
                queryValues = '"' + str(mysqlImportData[mysqlDataKey]) + '"'
                queryRe = '%%s'
            else:
                queryKey += ',' + str(mysqlDataKey)
                queryValues += ',"' + str(mysqlImportData[mysqlDataKey]) + '"'
                queryRe += ',%%s'
        query = """insert into tradesource.tm_trademarks (""" + queryKey + """) values (""" + queryValues + """)"""
        cursor = db.cursor()
        cursor.execute(query)
        cursor.close()
        db.commit()
    db.close()
    logInfo('redis导入到数据库完成')


# 合并两字典
def Merge(dict1, dict2):
    return {**dict1, **dict2}


# 获取csv文件的保存编码
def get_file_encoding(file):
    bigdata = open(file, 'rb')
    detector.reset()
    for line in bigdata.readlines():
        detector.feed(line)
        if detector.done:
            break
    detector.close()
    bigdata.close()
    return detector.result


if __name__ == '__main__':
    import_data()
