import xlrd
from collections import OrderedDict
import json
import pandas as pd
import os
import numpy as np


class ParseData:
    def __init__(self, xls_path_dir, json_path="all_item.json", excel_path="all_item.xlsx", result_path="result.xlsx"):
        self.xls_path_dir = xls_path_dir
        self.json_path = json_path
        self.excel_path = excel_path
        self.result_path = result_path

    def process_excel(self, input_path, output_path):
        """
        处理Excel文件并生成统计结果（保留原始数值调整符号）
        :param input_path: 输入文件路径
        :param output_path: 输出文件路径
        """
        # 读取Excel文件（假设第一行为标题）
        df = pd.read_excel(input_path, header=0)

        # 列索引配置（根据实际Excel列位置调整）
        COL_TYPE = 0  # A列：进口/出口标识（0-based索引）
        COL_RECORD = 8  # I列：备案序号
        COL_VALUE = 17  # R列：数值列
        COL_ALT = 28  # AC列：备选分组

        # 过滤无效数据（I列和AC列都为空的记录）
        valid_mask = df.iloc[:, COL_RECORD].notna() | df.iloc[:, COL_ALT].notna()
        df = df[valid_mask].copy()

        # 处理R列数值（进口保持原值，出口转为负值）
        # 向量化操作提升性能
        df.iloc[:, COL_VALUE] = np.where(
            df.iloc[:, COL_TYPE] == "进口保税核注清单",
            abs(df.iloc[:, COL_VALUE]),  # 确保进口为正值
            -abs(df.iloc[:, COL_VALUE])  # 确保出口为负值
        )

        # 创建分组依据（优先使用备案序号）
        df['分组依据'] = df.iloc[:, COL_RECORD].fillna(df.iloc[:, COL_ALT])

        # 分组求和计算
        result = df.groupby('分组依据', as_index=False).agg({
            df.columns[COL_VALUE]: 'sum'
        }).rename(columns={
            df.columns[COL_VALUE]: '合计值'
        })

        # 保存结果
        result.to_excel(output_path, index=False)
        print(f"处理完成，结果已保存至：{output_path}")
    def get_merged_value(self, sheet, merged_cells, row_idx, col_idx):
        """获取单元格值（考虑合并单元格）"""
        for rlo, rhi, clo, chi in merged_cells:
            if (row_idx >= rlo and row_idx < rhi and
                    col_idx >= clo and col_idx < chi):
                return sheet.cell_value(rlo, clo)
        return sheet.cell_value(row_idx, col_idx)


    def process_xls_table(self, file_path):
        # 打开工作簿
        workbook = xlrd.open_workbook(file_path)
        sheet = workbook.sheet_by_index(0)

        # 预处理合并单元格信息
        merged_cells = sheet.merged_cells

        # ========== 新增：提取5-15行信息部分 ==========
        info_config = [
            # 格式：('字段名', 行号（从1开始）, 列号（从0开始）)
            ('标题', 2, 18),
            ('预录入统一编号', 5, 11),  # 第5行A列
            ('清单编号', 5, 31),  # 第7行B列
            ('手(账)册编号', 5, 51),  # 第9行C列
            ('核扣标志', 11, 31), # 第11行D列
            ('报关标志', 12, 31)  # 第11行D列
        ]

        info_data = OrderedDict()
        for field in info_config:
            name, src_row, src_col = field
            # 转换为0-based索引
            row_idx = src_row - 1
            col_idx = src_col

            # 获取值（处理合并单元格）
            value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
            info_data[name] = value.strip() if isinstance(value, str) else value

        # ========== 处理表头（第18行） ==========
        headers = []
        for col_idx in range(sheet.ncols):
            value = self.get_merged_value(sheet, merged_cells, 17, col_idx)  # 18行
            headers.append(value)

        # ========== 处理数据行（从第19行开始） ==========
        data = []
        for row_idx in range(18, sheet.nrows):  # 19行开始
            # 检查空行终止
            if all(cell.value == "" for cell in sheet.row(row_idx)):
                break

            record = info_data.copy()  # 包含信息部分数据
            for col_idx in range(len(headers)):
                value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
                record[headers[col_idx]] = value if value != "" else None

            record["file_path"] = file_path

            data.append(record)

        return json.dumps(data, ensure_ascii=False, indent=2)


    def process_xls_table2(self, file_path):
        # 打开工作簿
        workbook = xlrd.open_workbook(file_path)
        sheet = workbook.sheet_by_index(0)

        # 预处理合并单元格信息
        merged_cells = sheet.merged_cells

        # ========== 提取5-15行信息部分 ==========
        info_config = [
            ('标题', 2, 18),
            ('预录入统一编号', 5, 11),
            ('清单编号', 5, 31),
            ('手(账)册编号', 5, 51),
            ('核扣标志', 11, 31),
            ('报关标志', 12, 31)
        ]

        info_data = OrderedDict()
        for field in info_config:
            name, src_row, src_col = field
            row_idx = src_row - 1
            col_idx = src_col
            value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
            info_data[name] = value.strip() if isinstance(value, str) else value

        # ========== 动态查找表头行 ==========
        header_row = None
        for row_idx in range(sheet.nrows):
            for col_idx in range(sheet.ncols):
                cell_value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
                if cell_value == '表体':
                    header_row = row_idx + 1
                    break
            if header_row is not None:
                break

        if header_row is None:
            raise ValueError("文件中未找到'表体'字样，无法确定表头行位置")

        # ========== 处理表头 ==========
        headers = []
        for col_idx in range(sheet.ncols):
            value = self.get_merged_value(sheet, merged_cells, header_row, col_idx)
            headers.append(value)

        # ========== 处理数据行 ==========
        data = []
        for row_idx in range(header_row + 1, sheet.nrows):
            # 检查空行终止
            is_empty = True
            for col_idx in range(sheet.ncols):
                value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
                if value not in (None, ""):
                    is_empty = False
                    break
            if is_empty:
                break

            record = info_data.copy()
            for col_idx in range(len(headers)):
                value = self.get_merged_value(sheet, merged_cells, row_idx, col_idx)
                record[headers[col_idx]] = value if value != "" else None

            record["file_path"] = file_path
            data.append(record)

        return json.dumps(data, ensure_ascii=False, indent=2)




    def json_to_excel(self, json_file, excel_file):
        try:
            # 读取 JSON 文件
            with open(json_file, 'r', encoding="utf-8") as f:
                try:
                    data = json.load(f)
                    print("JSON验证成功！")
                except json.JSONDecodeError as e:
                    print(f"错误位置：第 {e.lineno} 行，第 {e.colno} 列")
                    print(f"错误详情：{e.msg}")
            # 将 JSON 转换为 DataFrame（假设 JSON 是列表格式）
            df = pd.DataFrame(data)


            # 保存为 Excel
            df.to_excel(excel_file, index=False, engine='openpyxl')
            print(f"转换成功！Excel 文件已保存至：{excel_file}")

        except Exception as e:
            print(f"转换失败: {e}")


    def list_all_files(self, directory):
        """
        遍历目录并返回所有文件的绝对路径列表
        """
        file_paths = []

        # 遍历目录树
        for root, dirs, files in os.walk(directory):
            for filename in files:
                # 拼接完整路径
                full_path = os.path.join(root, filename)
                if full_path.endswith(".xls"):
                    file_paths.append(full_path)

        return file_paths

    def main(self):
        f = open(self.json_path, "w", encoding="utf-8")
        json_str_list = []
        file_paths = self.list_all_files(self.xls_path_dir)

        for path in file_paths:
            json_output = self.process_xls_table2(path)
            data = json_output.replace('"": null,', '').replace('null', '""')
            data = json.loads(data)
            for json_str in data:
                json_str_list.append(json_str)

        json.dump(json_str_list, f, ensure_ascii=False, indent=2)
        f.close()
        self.json_to_excel(self.json_path, self.excel_path)
        self.process_excel(self.excel_path, self.result_path)

# # 使用示例
# if __name__ == "__main__":
#     xls_path_dir = "/Users/yezhian/YT/YT/dy/flask_server/static/unzip/xlsfile"
#
#     p = ParseData(xls_path_dir)
#     p.main()
