import os
import oss2
import chardet
from pathlib import Path
import pandas as pd
import logging as logger
from typing import List, Union


# ================== 配置区 ==================
# OSS 配置
ACCESS_KEY_ID = 'LTAI5t89pT1kuBLHBzb5RpsT'
ACCESS_KEY_SECRET = 'wD9kV91Kmpy3jsoc6qOv8e5f5lvnXl'
ENDPOINT = 'https://oss-cn-wulanchabu.aliyuncs.com'  # 替换为你的 region endpoint
BUCKET_NAME = 'xm-stock-data'



# 本地根目录（绝对路径）
ROOT_DIR = '/Users/mac/Downloads/2025/save/jsonl'

# 从第 N 级子目录开始（N=1 表示直接从 ROOT_DIR 下一级开始）
N = 1  # 例如：/a/b/c/d/e.txt → 从 c/d/e.txt 开始作为 OSS key

# 允许上传的文件编码（小写）
ALLOWED_ENCODINGS = {'utf-8', 'gbk', 'gb2312'}

# 可选：只处理特定后缀的文件（如 .json, .txt）
ALLOWED_SUFFIXES = {'.jsonl', '.txt', '.log', '.csv'}

# 是否跳过二进制文件（如图片、压缩包）
SKIP_BINARY = True




class OSSDataProcessor():

    def __init__(self):
        # 初始化 Auth 和 Bucket，并保存为实例属性
        # ================== 初始化 OSS ==================
        auth = oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET)
        bucket = oss2.Bucket(auth, ENDPOINT, BUCKET_NAME)
        self.bucket = bucket

    def auto_detect_and_load(self, file_path: str, **kwargs) -> pd.DataFrame:
        """
        自动检测文件格式并加载为DataFrame

        Args:
            file_path: OSS中的文件路径
            **kwargs: 各读取函数的参数

        Returns:
            pandas DataFrame
        """
        if not self.file_exists(file_path):
            logger.error(f"文件不存在: {file_path}")
            return pd.DataFrame()

        file_ext = file_path.lower().split('.')[-1]

        if file_ext in ['csv', 'txt']:
            return self.read_csv(file_path, **kwargs)
        elif file_ext in ['xlsx', 'xls']:
            return self.read_excel(file_path, **kwargs)
        elif file_ext == 'json':
            return self.read_json(file_path, **kwargs)
        elif file_ext in ['parquet', 'pq']:
            return self.read_parquet(file_path, **kwargs)
        else:
            logger.error(f"不支持的文件格式: {file_ext}")
            return pd.DataFrame()

    def load_multiple_files(self, file_patterns: List[str], concat: bool = True, **kwargs) -> Union[pd.DataFrame, dict]:
        """
        加载多个文件

        Args:
            file_patterns: 文件路径模式列表
            concat: 是否合并所有DataFrame
            **kwargs: 读取参数

        Returns:
            合并的DataFrame或文件名的字典
        """
        all_dfs = {}

        for pattern in file_patterns:
            # 查找匹配的文件
            matching_files = [f for f in self.list_files() if pattern in f]

            for file_path in matching_files:
                logger.info(f"加载文件: {file_path}")
                df = self.auto_detect_and_load(file_path, **kwargs)

                if not df.empty:
                    all_dfs[file_path] = df

        if concat and all_dfs:
            # 合并所有DataFrame
            combined_df = pd.concat(all_dfs.values(), ignore_index=True)
            logger.info(f"合并了 {len(all_dfs)} 个文件，总形状: {combined_df.shape}")
            return combined_df
        else:
            return all_dfs

    def process_timestamp_columns(self, df: pd.DataFrame, timestamp_cols: List[str]) -> pd.DataFrame:
        """
        处理时间戳列（转换为10位时间戳）

        Args:
            df: 输入的DataFrame
            timestamp_cols: 时间戳列名列表

        Returns:
            处理后的DataFrame
        """
        df_processed = df.copy()

        for col in timestamp_cols:
            if col in df_processed.columns:
                logger.info(f"处理时间戳列: {col}")

                # 安全转换为数值
                df_processed[col] = pd.to_numeric(df_processed[col], errors='coerce')

                # 转换为10位时间戳
                mask_19 = df_processed[col] >= 1e18  # 纳秒级
                mask_16 = df_processed[col] >= 1e15  # 微秒级
                mask_13 = df_processed[col] >= 1e12  # 毫秒级

                df_processed.loc[mask_19, col] = df_processed.loc[mask_19, col] // 10 ** 9
                df_processed.loc[mask_16 & ~mask_19, col] = df_processed.loc[mask_16 & ~mask_19, col] // 10 ** 6
                df_processed.loc[mask_13 & ~mask_16, col] = df_processed.loc[mask_13 & ~mask_16, col] // 1000

                # 转换为可空整数类型
                df_processed[col] = df_processed[col].astype('Int64')

        return df_processed




    def detect_encoding(file_path: Path) -> str:
        """检测文件编码"""
        try:
            with open(file_path, 'rb') as f:
                raw_data = f.read(1024)  # 读前 1KB 判断编码
                if not raw_data:
                    return 'empty'
                result = chardet.detect(raw_data)
                encoding = result['encoding']
                confidence = result['confidence']
                if confidence < 0.7:
                    return 'unknown'
                return encoding.lower() if encoding else 'unknown'
        except Exception as e:
            print(f"⚠️ 编码检测失败 {file_path}: {e}")
            return 'error'


    def is_text_file(file_path: Path) -> bool:
        """简单判断是否为文本文件（基于后缀或内容）"""
        suffix = file_path.suffix.lower()
        if suffix in {'.jpg', '.png', '.gif', '.zip', '.gz', '.pdf', '.mp4', '.exe'}:
            return False
        return True


    def upload_directory_to_oss(self,root_dir: str, n_level: int,code_list):
        root = Path(root_dir).resolve()
        if not root.is_dir():
            raise ValueError(f"根目录不存在: {root}")

        for file_path in root.rglob('*'):
            # 调试信息（临时开启）
            # print(f"→ 检查: {file_path} | exists={file_path.exists()} | is_file={file_path.is_file()}")
            if not file_path.is_file():
                continue

            # 获取相对路径
            try:
                rel_path = file_path.relative_to(root)
            except ValueError:
                continue  # 不在 root 下，跳过

            parts = rel_path.parts
            if len(parts) < n_level:
                print(f"跳过层级不足的文件: {file_path}")
                continue

            if(len(parts)==2):
                #不在指定的数组中 则不上传该股票历史数据到oss
                if parts[0] not in code_list:
                    continue


            # 从第 N 级开始构建 OSS key
            oss_key_parts = parts[n_level - 1:]  # 因为 parts[0] 是第1级
            oss_key = '/'.join(oss_key_parts)

            # 后缀过滤
            if file_path.suffix.lower() not in ALLOWED_SUFFIXES:
                print(f"跳过非目标后缀文件: {oss_key}")
                continue

            # 跳过二进制文件（可选）
            if SKIP_BINARY and not OSSDataProcessor.is_text_file(file_path):
                print(f"跳过疑似二进制文件: {oss_key}")
                continue

            # 检测编码
            encoding = OSSDataProcessor.detect_encoding(file_path)
            if encoding in ('error', 'unknown', 'empty'):
                print(f"跳过无法识别编码的文件: {oss_key} (detected: {encoding})")
                continue

            # if encoding not in ALLOWED_ENCODINGS:
            #     print(f"跳过非允许编码文件: {oss_key} (encoding: {encoding})")
            #     continue

            # 上传到 OSS
            try:
                print(f"📤 正在上传: {oss_key} (编码: {encoding})")
                x.bucket.put_object_from_file(oss_key, str(file_path))
                print(f"✅ 上传成功: {oss_key}")
            except Exception as e:
                print(f"❌ 上传失败: {oss_key} - {e}")



if __name__ == '__main__':
    coe_list = ['515220.SH', '159798.SZ', '159611.SZ', '160416.SZ', '159735.SZ', '516160.SH', '515790.SH', '515030.SH',
                '515210.SH', '512400.SH', '518880.SH', '516780.SH', '159870.SZ', '159866.SZ', '159745.SZ', '159928.SZ',
                '516960.SH', '159516.SZ', '159883.SZ', '512660.SH', '512550.SH', '159800.SZ', '516110.SH', '159565.SZ',
                '159996.SZ', '159997.SZ', '512980.SH', '159869.SZ', '159766.SZ', '159986.SZ', '515170.SH', '512690.SH',
                '159843.SZ', '159863.SZ', '159949.SZ', '159825.SZ', '512170.SH', '159846.SZ', '515120.SH', '159570.SZ',
                '588370.SH', '159938.SZ', '561510.SH', '512880.SH', '512800.SH', '512070.SH', '159851.SZ', '512480.SH',
                '515230.SH', '159890.SZ', '515880.SH', '513050.SH', '513040.SH', '159819.SZ', '512720.SH', '159994.SZ',
                '515050.SH', '512580.SH', '159767.SZ', '159788.SZ', '512200.SH', '159746.SZ', '159810.SZ', '159885.SZ',
                '159763.SZ', '562030.SH', '516330.SH', '159882.SZ', '159881.SZ', '159876.SZ', '516800.SH', '562500.SH',
                '159845.SZ', '516610.SH', '518800.SH', '159880.SZ', '510210.SH', '159992.SZ', '512290.SH', '159865.SZ',
                '159827.SZ', '562510.SH', '167301.SZ', '512760.SH', '159995.SZ', '516510.SH', '515400.SH', '159755.SZ',
                '159976.SZ', '516150.SH', '159945.SZ', '515060.SH', '159619.SZ', '159954.SZ', '561910.SZ']

    # coe_list = ['159755.SZ']

    x = OSSDataProcessor()

    # select_json_params = {'Json_Type': 'LINES'}
    # result = x.bucket.select_object('159755.SZ/159755_5.jsonl', "select * from ossobject s where s.d = 20251030", None, select_json_params)
    # select_content = result.read()
    # print(select_content)


    print(f"上传历史数据。。。。。")
    x.upload_directory_to_oss(ROOT_DIR, N,coe_list)
    print(f"历史数据全部上传完毕。。。。。")