# -*- coding: utf-8 -*-
import time

import webview
import pandas as pd
import os
import chardet
import string
from hashlib import md5
from re import escape


class Api:
    def read_csv(self, data):
        header_num = data["head_rows"]
        csv_files = data["selectedFolder"]
        num = 0
        file_tasks = [suffix for suffix in os.listdir(csv_files) if suffix.endswith(".csv")]
        self.set_log(f"当前任务总数：{len(file_tasks)}")
        if data["key_words"]:
            data["keys_set"] = [val.strip() for val in Api.read_keys(data["selectedFile"]) if val.strip()]
        chunksize = 1000000
        self.hash_set = set()
        temp_df = pd.DataFrame()
        data["file_sum"] = 1
        first_s = True
        for file in file_tasks:
            num += 1
            self.set_log(f"当前读取进度：{num}，任务名称：{file}")
            file = os.path.join(csv_files, file)
            encoding = Api.get_encoding(file)
            if self.headers_df is None and header_num > 1 and num == 1:
                self.headers_df = pd.read_csv(file, nrows=header_num - 1, encoding=encoding, encoding_errors="replace",
                                              on_bad_lines='warn')
            if header_num > 1:
                df = pd.read_csv(file, skiprows=[1, header_num - 1], encoding=encoding, encoding_errors="replace",
                                 on_bad_lines='warn', chunksize=chunksize, iterator=True)
            elif header_num == 0:
                df = pd.read_csv(file, encoding=encoding, encoding_errors="replace", on_bad_lines='warn', header=None,
                                 chunksize=chunksize, iterator=True)
            else:
                df = pd.read_csv(file, encoding=encoding, encoding_errors="replace", on_bad_lines='warn',
                                 chunksize=chunksize, iterator=True)
            for chunk in df:
                if chunk.empty: break
                chunk = self.batch_df(data, chunk)
                self.set_log("正在进行数据导出···")
                if data["per_rows"] and data["per_rows_num"] > 0:
                    temp_df = pd.concat([temp_df, chunk], ignore_index=True)
                    rows_per_file = data["per_rows_num"]
                    while len(temp_df) >= rows_per_file:
                        self.export_file(temp_df.iloc[:rows_per_file], data, False)
                        temp_df.drop(temp_df.index[:rows_per_file], inplace=True)
                        data["file_sum"] += 1
                else:
                    data["file_sum"] = 0
                    self.export_file(chunk, data, first_s)
                    first_s = False
        if not temp_df.empty:
            self.export_file(temp_df, data, False)
        return None

    def batch_df(self, data, chunk):
        try:
            if data["rm_repeat"]:
                self.set_log("正在对指定列去重···")
                chunk = self.rm_repeat(chunk, data["rm_repeat_cols"])
            if data["key_words"] and data.get("keys_set"):
                self.set_log("正在过滤关键词···")
                chunk = self.rm_cols(chunk, data["key_words_cols"], data["keys_set"])
        except IndexError as e:
            self.set_log(f"输入的列标记{e}超出范围")
            return {"code": -1, "msg": f"输入的列标记{e}超出范围"}
        except KeyError as e:
            self.set_log(f"输入的列名{e}不存在")
            return {"code": -1, "msg": f"输入的列名{e}不存在"}
        if data["sample"]:
            self.set_log("正在进行数据随机打乱···")
            chunk = self.sample_data(chunk)
        return chunk

    @staticmethod
    def get_encoding(file):
        with open(file, 'rb') as f:
            result = chardet.detect(f.read(4096))
            encoding = result['encoding']
            if encoding is None or encoding == 'ascii':
                encoding = 'utf-8'
        return encoding

    @staticmethod
    def sample_data(df):
        return df.sample(frac=1).reset_index(drop=True)

    @staticmethod
    def excel_column_to_index(col_name):
        col_name = col_name.upper()
        result = 0
        for char in col_name:
            if char in string.ascii_uppercase:
                result = result * 26 + (ord(char) - ord('A') + 1)
        return result - 1

    @staticmethod
    def convert_column(df, col):
        col = col.strip()
        if col.isalpha():
            col_idx = Api.excel_column_to_index(col)
            if col_idx >= len(df.columns):
                raise IndexError(f"列标记 '{col}' 超出范围")
            return df.columns[col_idx]
        elif col.isdigit():
            col_idx = int(col) - 1
            if col_idx >= len(df.columns):
                raise IndexError(f"列索引 '{col}' 超出范围")
            return df.columns[col_idx]
        else:
            if col not in df.columns:
                raise KeyError(f"列名 '{col}' 不存在")
            return col

    @staticmethod
    def read_keys(path):
        encoding = Api.get_encoding(path)
        with open(path, "r", encoding=encoding) as f:
            f1 = f.readlines()
        return f1

    @staticmethod
    def rm_cols(chunk, key, keywords):
        key = Api.convert_column(chunk, key)
        chunk[key] = chunk[key].astype(str, errors="ignore")
        keyword_pattern = '|'.join(escape(val) for val in keywords)
        mask = ~chunk[key].str.contains(keyword_pattern, na=False, regex=True)
        return chunk[mask]

    def rm_repeat(self, chunk, cols):
        sub = [Api.convert_column(chunk, col.strip()) for col in cols.split(",")]
        chunk_copy = chunk.copy()  # 创建 chunk 的副本
        chunk_copy["tmp_hash_001"] = chunk_copy[sub].apply(
            lambda row: md5(','.join(row.astype(str)).encode()).hexdigest(), axis=1)
        chunk_copy = chunk_copy[~chunk_copy["tmp_hash_001"].isin(self.hash_set)]
        self.hash_set.update(chunk_copy["tmp_hash_001"])
        chunk_copy.drop_duplicates(subset="tmp_hash_001", inplace=True)
        chunk_copy.drop(columns=["tmp_hash_001"], inplace=True)
        return chunk_copy

    def export_file(self, df, data, first_s):
        path = data["out_path"]
        num = data["head_rows"]
        sum = data["file_sum"]
        if sum > 0:
            if self.headers_df is not None:
                df = pd.concat([self.headers_df, df], ignore_index=True)
            return df.to_csv(os.path.join(path, f'result_{data["file_sum"]}.csv'), index=False,
                             header=False if num == 0 else True,
                             errors='replace')
        header = False
        if self.headers_df is not None:
            df = pd.concat([self.headers_df, df], ignore_index=True)
            self.headers_df = None
            header = False if num == 0 else True
        if first_s:
            df.to_csv(os.path.join(path, "result.csv"), index=False, errors='replace',
                      header=False if num == 0 else True)
        else:
            df.to_csv(os.path.join(path, "result.csv"), index=False, header=header, mode="a", errors='replace')

    def get_task_msg(self):
        if not self.status_list:
            return ''
        return self.status_list.pop(0)

    def set_log(self, msgs):
        print(msgs)
        self.status_list.append(msgs)

    def sub_main(self, data):
        print(data)
        self.headers_df = None
        self.status_list = []
        self.file_lines = 0
        self.file_index = 1
        self.set_log("正在读取数据···")
        out_path = os.path.join(data["selectedFolder"], "result")
        if not os.path.exists(out_path):
            os.mkdir(out_path)
        data["out_path"] = out_path
        try:
            self.read_csv(data)
        except Exception as e:
            print(e)
            self.set_log("文件读取失败！文件编码无法识别")
            return {"code": -1, "msg": "文件读取失败！文件编码无法识别"}
        self.set_log('导出目录：' + out_path)
        self.set_log("数据处理完成！")
        return {"code": 0, "msg": "数据处理完成！"}

    def select_folder(self):
        folder_path = window.create_file_dialog(webview.FOLDER_DIALOG)
        if folder_path:
            return folder_path[0]
        return None

    def select_file(self):
        file_types = [
            "Text Files (*.txt)",
        ]
        file_path = window.create_file_dialog(
            webview.OPEN_DIALOG, allow_multiple=True, file_types=file_types
        )
        if file_path:
            return file_path[0]
        return None


if __name__ == "__main__":
    api = Api()
    window = webview.create_window("CSV批处理工具", "dist/batch_csv.html", js_api=api, width=460, height=750,
                                   resizable=False)
    webview.start()
    # python -m nuitka --standalone --include-data-dir=dist=./dist --jobs=5 --windows-icon-from-ico=icon.ico --windows-console-mode=disable main.py
    # 关键词, 商品链接
    # 标题: 电暖器，标题: 石墨烯
