import json
import os
import pandas as pd
from typing import Dict, Any, Union, List
from io import StringIO, BytesIO
from tqdm import tqdm
from V1.structure import Strut


class ConverData(object):

    def convert_structure(self, strut: Strut) -> Dict[str, Any]:
        return {strutField.field: strutField.value for strutField in strut.fields}

    def _set_args_dict(self, args_dict):
        self._args_dict = args_dict

    def _convert_csv(self, structure_name: str, data: List[Strut]):
        data = [self.convert_structure(data_info) for data_info in data]
        df = pd.DataFrame(data)
        csv_output = StringIO()
        df.to_csv(csv_output, index=False, encoding="utf-8")
        csv_output.seek(0)  # Reset cursor
        return csv_output.getvalue()  # Return string

    def _convert_json(self, structure_name: str, data: List[Strut]):
        data = [self.convert_structure(data_info) for data_info in data]
        # 生成 JSON {"RECORDS":[]}
        json_object = {"RECORDS": data}
        return json.dumps(json_object, ensure_ascii=False, indent=4)

    def _convert_xlsx(self, structure_name: str, data: List[Strut]):
        data = [self.convert_structure(data_info) for data_info in data]
        df = pd.DataFrame(data)  # Create a DataFrame with a single row
        xlsx_output = BytesIO()  # Use BytesIO
        with pd.ExcelWriter(xlsx_output, engine="xlsxwriter") as writer:
            df.to_excel(writer, index=False, sheet_name="Sheet1")
        xlsx_output.seek(0)  # Reset cursor
        return xlsx_output.getvalue()  # Return byte stream

    def _convert_sql(self, structure_name: str, data: List[Strut]):
        dbType = self._args_dict["dbType"]
        sqls = []
        for data_strut in data:
            columns = [strutField.field for strutField in data_strut.fields]
            columns = ", ".join(columns)

            values = []
            for strutField in data_strut.fields:
                field_type = strutField.type
                value = strutField.value
                if field_type == "str":
                    # 对字符串进行转义
                    values.append(f"'{str(value).replace("'", "''")}'")

                elif field_type == "num":
                    # 直接使用数字
                    values.append(str(value))
                elif field_type == "date":
                    if dbType == "oracle":
                        values.append(f"TO_DATE('{value}', 'yyyy-MM-dd hh24:mi:ss')")
                    else:
                        values.append(f"'{str(value).replace("'", "''")}'")
                else:
                    # 处理其他类型（如 None）
                    values.append("NULL")

            # 生成 SQL 语句
            sql_statement = (
                f"INSERT INTO {structure_name} ({columns}) VALUES ({', '.join(values)});"
            )

            sqls.append(sql_statement)
        return "\n".join(sqls)  # Return string

    def convert_map(self):
        # 返回dict
        return {
            "json": lambda structure_name, data: self._convert_json(
                structure_name, data
            ),
            "csv": lambda structure_name, data: self._convert_csv(structure_name, data),
            "xlsx": lambda structure_name, data: self._convert_xlsx(
                structure_name, data
            ),
            "sql": lambda structure_name, data: self._convert_sql(structure_name, data),
        }


def convert_stream(
    args_dict: Dict[str, Any] = {"format": "json", "dbType": "mysql"},
    data: List[Dict[str, Any]] = [],
    structure_name: str = "example",
    convertData: ConverData = ConverData(),
) -> Union[StringIO, bytes]:
    """
    转换对应格式内容。

    参数
    - args_dict:
      format: 输出文件类型 json|csv|xlsx|sql 默认json
      dbType: 生成sql时，对应数据可类型，mysql|oracle 默认mysql
    - data: 数据内容（字典集合）
    - convertData: ConvertData对象 可以不传

    输出
    文件流
    """
    convertData._set_args_dict(args_dict)

    format = args_dict.get("format", "json")
    convert_map = convertData.convert_map()

    # 判断类型
    if format in convert_map:
        return convert_map[format](structure_name, data)
    else:
        print("目前仅支持转换 json, csv, xlsx, sql")
        return None


def convert(
    datas: List[tuple[str, str, List[Dict[str, Any]], Dict[str, Any]]] = [],
) -> List[tuple[str, str, Union[StringIO, bytes]]]:
    """
    转换对应格式内容。

    参数
    - datas: 数据集
        元组对象：索引0 文件名称 索引1 结构名称 索引2：数据内容 索引3：args_dict
    - args_dict:
      format: 输出文件类型 json|csv|xlsx|sql 默认json
      dbType: 生成sql时，对应数据可类型，mysql|oracle 默认mysql

    输出 tuple集
    - 文件名, 文件流集合
    """
    convert_data = ConverData()

    if not datas:
        print("数据集为空，跳过执行")
        return []

    return [
        (
            data[0],
            f"{data[1]}.{data[3]['format']}",
            convert_stream(data[3], data[2], data[1], convert_data),
        )
        for data in datas
    ]


def output_file(
    stream_list: List[tuple[str, str, Union[str, bytes]]], out_path: str = os.getcwd()
):
    if stream_list is None:
        print("未解析出有效结果，跳过生成")
        return
    """
    将转换后的文件流写入文件
    """
    for stream in tqdm(stream_list, desc="文件写入进度", unit="秒"):
        output_dir = os.path.join(out_path, "results", stream[0])
        os.makedirs(output_dir, exist_ok=True)  # 创建目录，如果已存在则不报错
        filename = os.path.join(output_dir, stream[1])
        streamio = stream[2]

        # 判断流的类型并写入文件
        if filename.endswith("xlsx"):
            with open(filename, "wb") as f:
                f.write(streamio)  # 写入字节流
        else:
            with open(filename, "w", encoding="utf-8") as f:
                f.write(streamio)  # 直接写入字符串


def print_file(
    stream_list: List[tuple[str, Union[str, bytes]]], out_path: str = os.getcwd()
):
    if stream_list is None:
        print("未解析出有效结果，跳过打印")
        return
    complete_path = []
    """
    将转换后的文件流写入文件
    """
    for stream in tqdm(stream_list, desc="文件写入进度", unit="秒"):
        output_dir = os.path.join(out_path, "results", stream[0])
        os.makedirs(output_dir, exist_ok=True)  # 创建目录，如果已存在则不报错
        filename = stream[1]
        filename_path = os.path.join(output_dir, stream[1])
        streamio = stream[2]

        # 判断流的类型并写入文件
        if filename.endswith("xlsx"):
            with open(filename_path, "wb") as f:
                f.write(streamio)  # 写入字节流
        else:
            with open(filename_path, "w", encoding="utf-8") as f:
                f.write(streamio)  # 直接写入字符串

        complete_path.append(filename_path)

        # 判断流的类型并打印内容
        if filename.endswith("xlsx"):
            print(f"以表格形式展示{filename_path}文件5行内容:")
            # 使用 BytesIO 读取字节流
            with BytesIO(streamio) as xlsx_stream:
                df = pd.read_excel(xlsx_stream)
                print(df.head(5))  # 打印 DataFrame 的内容
        else:
            print(f"展示{filename_path}文件5行内容:")
            if isinstance(streamio, bytes):
                # 如果是字节流，解码为字符串
                content = streamio.decode("utf-8")
            else:
                content = streamio  # 直接使用字符串内容

            # 限制打印最多五行
            lines = content.splitlines()  # 按行分割
            len = 5
            if filename.endswith("csv"):
                len = 6
            for line in lines[:len]:  # 只打印前五行
                print(line)
        print()

    print("更多内容请跳转以下目录进行查看：")
    print("\n".join(complete_path))


__all__ = ["convert", "convert_stream", "output_file", "print_file"]


if __name__ == "__main__":
    # 示例用法
    json_output = convert(
        [
            (
                "test1",
                [
                    {"name": "Alice", "age": 30},
                    {"name": "Alice2", "age": 30},
                    {"name": "Alice3", "age": 30},
                    {"name": "Alice4", "age": 30},
                    {"name": "Alice5", "age": 30},
                    {"name": "Alice6", "age": 30},
                    {"name": "Alice7", "age": 30},
                ],
                {"format": "sql", "dbType": "mysql"},
            ),
            (
                "test2",
                [
                    {"name": "Alice", "age": 30},
                    {"name": "Alice2", "age": 30},
                    {"name": "Alice3", "age": 30},
                    {"name": "Alice4", "age": 30},
                    {"name": "Alice5", "age": 30},
                    {"name": "Alice6", "age": 30},
                    {"name": "Alice7", "age": 30},
                ],
                {"format": "xlsx", "dbType": "mysql"},
            ),
            (
                "test3",
                [
                    {"name": "Alice", "age": 30},
                    {"name": "Alice2", "age": 30},
                    {"name": "Alice3", "age": 30},
                    {"name": "Alice4", "age": 30},
                    {"name": "Alice5", "age": 30},
                    {"name": "Alice6", "age": 30},
                    {"name": "Alice7", "age": 30},
                ],
                {"format": "csv", "dbType": "mysql"},
            ),
            (
                "test4",
                [
                    {"name": "Alice", "age": 30},
                    {"name": "Alice2", "age": 30},
                    {"name": "Alice3", "age": 30},
                    {"name": "Alice4", "age": 30},
                    {"name": "Alice5", "age": 30},
                    {"name": "Alice6", "age": 30},
                    {"name": "Alice7", "age": 30},
                ],
                {"format": "json", "dbType": "mysql"},
            ),
        ]
    )
    print_file(json_output)
