import ceshi

from utils.mysqlUtils.db_utils import DB
from utils.mysqlUtils.sql_server_utils import MSSQL
from utils.readFileUtils.excel_control import write_excle_data
import ast
import copy

class Testishotdata:

    def data_comparison(self,mysql_sql,sqlserver_sql,mysql_db,sqlserver_db,filedir,whether_circulation=False):

        # sqlserver 连接
        if whether_circulation == True:
            sqlserver_res = []
            for i in sqlserver_sql:
                sqlserver_db = MSSQL(sqlserver_db)
                sqlserver_res.append(sqlserver_db.ExecQuery(i))

            sqlserver_res_end = list(set(sqlserver_res))
            sum_sqlserver_res = len(sqlserver_res_end)

        else:
            sqlserver_db = MSSQL(sqlserver_db)
            sqlserver_res = sqlserver_db.ExecQuery(sqlserver_sql)
            sum_sqlserver_res = len(sqlserver_res)

        # mysql 连接
        mysql_db = DB(mysql_db)
        mysql_res = mysql_db.execute_sql_many(mysql_sql)
        sum_mysql_res = len(mysql_res)

        # 判断 库表 不一致的数据，并写入列表
        error_list = []
        if sum_sqlserver_res > sum_mysql_res:
            for one in sqlserver_res:
                if one not in mysql_res:
                    error_list.append(one)

        else:
            for one in mysql_res:
                if one not in sqlserver_res:
                    error_list.append(one)

        # 解析列表数据，写进Excle表格
        write_excle_data(error_list,filedir)
        # 打印出数据相关信息
        error_data = f'生产库数据有{sum_sqlserver_res}条数据,冷数据库表有{sum_mysql_res}条数据,冷数据库表缺失{len(error_list)}条数据'

        return error_list,error_data

    def data_duplicate_checking(self,db_name,mysql_sql,sqlserver_sql,mysql_db=False,sqlserver_db=False):

        file_data = []
        repetition_data = []
        sum = 0

        if mysql_db:
            mysql_db = DB(db_name)
            mysql_res = mysql_db.execute_sql_many(mysql_sql)
            sum_mysql_res = len(mysql_res)

            for one in mysql_res:
                if one not in repetition_data:
                    file_data.append(one)
                else:
                    repetition_data.append(one)
                    sum += 1

            write_excle_data(repetition_data, filedir="D:\web_autotest\zhcx-api-autotest\common\\")
            error_data = f'文件有{sum_mysql_res}条数据,重复数据有{sum}条数据'

            return error_data

        elif sqlserver_db:

            sqlserver_db = MSSQL(db_name)
            sqlserver_res = sqlserver_db.ExecQuery(sqlserver_sql)
            sum_sqlserver_res = len(sqlserver_res)

            for one in sqlserver_res:
                if one not in repetition_data:
                    file_data.append(one)
                else:
                    repetition_data.append(one)
                    sum += 1

            write_excle_data(repetition_data, filedir="D:\web_autotest\zhcx-api-autotest\common\\")
            error_data = f'文件有{sum_sqlserver_res}条数据,重复数据有{sum}条数据'



    def read_file_data(self,filedir,filename):

        with open(filedir, encoding='utf-8') as file:
            sum = 0
            filedata = []
            repetition_data = []
            content = file.read()

            if type(content) == str:
                res = content.split(',')
                for one in res:
                    if one not in filedata:
                        filedata.append(one)
                    else:
                        repetition_data.append(one)

            else:
                for one in content:
                    filedata.append(one)
                    sum += 1

        write_excle_data(repetition_data, filename)
        error_data = f'文件有{len(res)}条数据,重复数据有{len(repetition_data)}条数据'

        return repetition_data,error_data


    # 数据库 - 文件 比对
    def file_duplicate_checking(self,db_name,mysql_sql,filedir,file_name):

        # 执行传参sql
        mysql_db = DB(db_name)
        mysql_res = mysql_db.execute_sql_many(mysql_sql)
        sum_mysql_res = len(mysql_res)

        # 将文件解析为列表并去重
        with open(filedir, encoding='utf-8') as file:
            content_list = []
            surplus_data = []
            content = file.read()
            if type(content) == str:
                content = content.split('\n')
                for one in content:
                    one = dict([("OrderSourceSKU", one)])
                    if one not in content_list:
                        content_list.append(one)
                    else:
                        surplus_data.append(one)
            sum_file_data, all_file_data, sum_surplus_data= len(content_list), len(content), len(surplus_data)
            # error_file = f'文件有{all_file_data}条数据,去重后有{sum_file_data}条数据,重复数据有{surplus_data}条'

        repetition_data = []
        data_list = []
        if sum_mysql_res > sum_file_data:
            for i in mysql_res:
                # 深拷贝出来,根据sku_kye去判断,追加用one
                # sku_key = copy.deepcopy(one).get("OrderSourceSKUID")
                if i not in content_list:
                    repetition_data.append(i)
            sum_repetition_data = len(repetition_data)
            error_data = f'数据库有{sum_mysql_res}条数据,文件有{sum_file_data}条数据,文件缺少{sum_repetition_data}条数据'

        else:
            for one in content_list:
                # 判断文本内容类型将文本内容转为字典类型
                # one = dict([("OrderSourceSKUID", one)])
                if one not in mysql_res:
                    repetition_data.append(one)
                else:
                    data_list.append(one)
            sum_repetition_data = len(repetition_data)
            error_data = f'数据库有{sum_mysql_res}条数据,文件有{sum_file_data}条数据,文件缺少{sum_repetition_data}条数据'

        # 将数据写入CSV
        write_excle_data(repetition_data, file_name)

        return repetition_data,error_data

    def Hot_data(self,db_name,filedir,file_name):

        with open(filedir, encoding='utf-8') as file:
            sqlserver_sql = file.read()
            if type(sqlserver_sql) == str and len(sqlserver_sql) > 0:
                sqlserver_sql = sqlserver_sql.split('\n')

        all_hot_data = []
        hot_data_OrderSourceSKU = []
        sqlserver_db = MSSQL(db_name)

        for one in sqlserver_sql:
            sqlserver_res = sqlserver_db.ExecQuery(one)
            all_hot_data = all_hot_data + sqlserver_res
        for i in all_hot_data:
            if i.get("OrderSourceSKU") not in hot_data_OrderSourceSKU:
                hot_data_OrderSourceSKU.append(i.get("OrderSourceSKU"))

        sum_hot_data = len(hot_data_OrderSourceSKU)

        hot_data = f'符合热数据条件有{sum_hot_data}条数据'
        write_excle_data(hot_data_OrderSourceSKU, file_name)

        return hot_data


if __name__ == '__main__':

    # 数据库文件比对
    sql = "SELECT OrderSourceSKU FROM aml_amazon_product_base_dep86 WHERE OrderSourceID = 5406 AND IsHotData = 1;"
    res = Testishotdata().file_duplicate_checking(db_name="mysql_db_code_base", mysql_sql=sql,filedir="D:\web_autotest\zhcx-api-autotest\Common\TESTDATA-20221129092448.csv",file_name="D:\web_autotest\zhcx-api-autotest\common\\")
    print(res)

    # 获取满足热数据的OrderSourceSKU
    # res = Testishotdata().Hot_data(db_name="mysql_db_erp_base",filedir="D:\web_autotest\zhcx-api-autotest\commonUtils\热数据sql.csv",file_name="D:\web_autotest\zhcx-api-autotest\commonUtils\\")
    # print(res)

    # 数据库比对
    # mysql_sql = "SELECT OrderSourceSKU FROM aml_amazon_product_base_dep86 WHERE OrderSourceID = 5406;"
    # sqlserver_sql = "SELECT OrderSourceSKU FROM RB_Amazon_Product_Base WHERE CustomerID = 1 AND OrderSourceID = 5406 AND OrderSourceSKUID > 0 AND AddTime < '2022-11-14'"
    #
    # mysql_db = "mysql_db_code_base"
    # sqlserver_db = "mysql_db_erp_base"
    # res = Testishotdata().data_comparison(mysql_sql=mysql_sql, sqlserver_sql=sqlserver_sql, mysql_db=mysql_db,sqlserver_db=sqlserver_db,filedir="D:\web_autotest\zhcx-api-autotest\commonUtils\\",whether_circulation=False)
    # print(res)

    # 数据库文件比对
    # sql = "SELECT OrderSourceSKU FROM aml_amazon_product_base_dep86 WHERE OrderSourceID = 5406 AND IsHotData = 1;"
    # res = Testishotdata().file_duplicate_checking(db_name="mysql_db_code_base", mysql_sql=sql,filedir="D:\web_autotest\zhcx-api-autotest\commonUtils\TESTDATA-20221122162050.csv",file_name="D:\web_autotest\zhcx-api-autotest\commonUtils\\")
    # print(res)

    # 文件查重
    # res = Testishotdata().read_file_data("D:\web_autotest\zhcx-api-autotest\commonUtils\冷数据-ordersourceskuid.txt",filename="D:\web_autotest\zhcx-api-autotest\commonUtils\\")
    # print(res)
    # # list_res = res.split("\n")

    # # 数据查重
    # sqlserver_name = "mysql_db_code_base"
    # sql = "SELECT OrderSourceSKU FROM RB_Amazon_Product_Base_ColdHotData WHERE OrderSourceID = 5005;"
    # mysql_sql = "SELECT OrderSourceSKUID FROM aml_amazon_product_base_dep86 WHERE OrderSourceID = 5406;"
    # filedata = Testishotdata().data_duplicate_checking(db_name=sqlserver_name,mysql_aaa=sql, sqlserver_sql=sql, sqlserver_db=True)
