import os
import random
from collections import defaultdict
from datetime import datetime
from typing import List

import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader

from config import config
from mydataset import GeoStyleDataset, FITDataset

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def get_time_features(window_dates: List[datetime]) -> np.ndarray:
    """
    生成时间相关特征，基于 datetime 时间提取周期性特征
    """
    features = []

    for dt in window_dates:
        month = dt.month
        weekday = dt.weekday()  # 0 = Monday, 6 = Sunday
        day_of_year = dt.timetuple().tm_yday  # 1 ~ 365 or 366

        # 提取周期性时间特征
        feat = [
            month / 12.0,  # 月份归一化
            weekday / 6.0,  # 星期归一化（0~6）
            np.sin(2 * np.pi * day_of_year / 365),  # 年内位置（周期性）
            np.cos(2 * np.pi * day_of_year / 365)
        ]
        features.append(feat)

    return np.array(features)


class MyDataLoader:
    """
    数据加载和预处理类，用于加载并预处理不同来源的时尚数据。
    """

    def __init__(self):
        self._source_list = [GeoStyleDataset(os.path.join(config.DATA_DIR, "GeoStyle")), FITDataset(os.path.join(config.DATA_DIR, "FIT"))]
        self.batch_size = 1024
        self.lookback = 52
        self.num_workers = 4

        def unify_city_ids():
            """
            将数据源的城市名称统一编码为新的城市 ID，并返回城市名称到新 ID 的映射。
            返回:
            - city_name_to_new_id: dict，统一城市名称到新 ID 的映射
            - new_id_to_city_name: dict，新 ID 到城市名称的映射（可选）
            """
            # 合并所有城市名，去重并排序
            s = set()
            for source in self._source_list:
                for city in source.get_city_name_list():
                    s.add(city)
            cities = sorted(list(s))

            # 为所有城市分配统一 ID（从 0 开始）
            city_name_to_new_id = {city: idx for idx, city in enumerate(cities)}
            new_id_to_city_name = {idx: city for city, idx in city_name_to_new_id.items()}

            return city_name_to_new_id, new_id_to_city_name

        def get_attributes_to_id_dict():
            s = set()
            for source in self._source_list:
                for attr in source.get_attributes_list():
                    s.add(attr)
            all_attr = sorted(list(s))

            res = {attr: idx for idx, attr in enumerate(all_attr)}

            return res

        # 整合城市
        self._city_name_to_id_dict, self._city_id_to_name_dict = unify_city_ids()
        # 整合属性
        self._attributes_to_id_dict = get_attributes_to_id_dict()

    def _extract_sequences(self, ts_data):

        sequences, targets = [], []

        if len(ts_data) < self.lookback + 1:
            return sequences, targets

        for i in range(len(ts_data) - self.lookback):
            # 趋势值和时间特征
            trend_values = [v for _, v in ts_data[i:i + self.lookback]]
            time_features = get_time_features([t for t, _ in ts_data[i:i + self.lookback]])

            # 拼接所有特征
            combined = np.concatenate([
                np.array(trend_values)[:, np.newaxis],  # (lookback, 1)
                time_features,  # (lookback, 4)
            ], axis=1)

            # 目标值为趋势差
            target_delta = ts_data[i + self.lookback][1] - ts_data[i + self.lookback - 1][1]

            sequences.append(combined)
            targets.append([target_delta])
        return sequences, targets

    def get_features(self, preprocessed_data, target_city_name: str = None, target_attribute_name: str = None):
        """
        获取特征，包括趋势序列、目标值、和对应的城市 ID 与属性 ID
        """
        all_sequences, all_targets, all_meta = [], [], []

        for (city_name, attr), ts_data in preprocessed_data.items():
            if target_city_name is not None and target_city_name != city_name:
                continue
            if target_attribute_name is not None and target_attribute_name != attr:
                continue

            sequences, targets = self._extract_sequences(ts_data)

            city_id = self._city_name_to_id_dict[city_name]
            attr_id = self._attributes_to_id_dict[attr]

            for i in range(len(sequences)):
                all_sequences.append(sequences[i])
                all_targets.append(targets[i])
                all_meta.append({
                    "city_id": city_id,
                    "attr_id": attr_id
                })

        return all_sequences, all_targets, all_meta

    def get_train_dataset(self, target_city_name: str = None, target_attribute_name: str = None):
        combined_sequences, combined_targets, combined_meta = [], [], []

        # === 收集所有数据 ===
        for source in self._source_list:
            sequences, targets, metas = self.get_features(source.get_preprocessed_normed(), target_city_name, target_attribute_name)
            combined_sequences.extend(sequences)
            combined_targets.extend(targets)
            combined_meta.extend(metas)

        # === 按 (city_id, attr_id, age_bin, gender) 分组 ===
        group_dict = defaultdict(list)
        for i, meta in enumerate(combined_meta):
            key = (meta["city_id"], meta["attr_id"])
            group_dict[key].append(i)

        train_idx, test_idx = [], []

        # === 分层划分每个组合 ===
        random.seed(42)
        for indices in group_dict.values():
            if len(indices) < 2:
                train_idx.extend(indices)
                continue
            random.shuffle(indices)
            split = int(0.8 * len(indices))
            train_idx.extend(indices[:split])
            test_idx.extend(indices[split:])

        # === 构建 TensorDataset ===
        def to_tensor(data_list):
            return torch.tensor(np.array(data_list), dtype=torch.float32)

        X = to_tensor([combined_sequences[i] for i in range(len(combined_sequences))])
        y = to_tensor([combined_targets[i] for i in range(len(combined_targets))])

        meta_city = torch.tensor([combined_meta[i]['city_id'] for i in range(len(combined_meta))], dtype=torch.long)
        meta_attr = torch.tensor([combined_meta[i]['attr_id'] for i in range(len(combined_meta))], dtype=torch.long)

        train_dataset = TensorDataset(
            X[train_idx], y[train_idx],
            meta_city[train_idx],
            meta_attr[train_idx]
        )
        test_dataset = TensorDataset(
            X[test_idx], y[test_idx],
            meta_city[test_idx],
            meta_attr[test_idx]
        )

        test_X = X[test_idx].to(device)
        test_y_true = y[test_idx].cpu().numpy().flatten()

        return (
            DataLoader(train_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers),
            DataLoader(test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers),
            len(train_idx),
            test_X,
            test_y_true
        )

    def get_cities_length(self):
        return len(self._city_id_to_name_dict.keys())

    def get_attributes_length(self):
        return len(self._attributes_to_id_dict.keys())

    def get_sources_length(self):
        return len(self._source_list)

    def get_available_city_list(self):
        return list(self._city_name_to_id_dict.keys())

    def get_available_attributes(self, target_city_name: str | None):
        s = set()
        for source in self._source_list:
            for attr in source.get_attributes_list(target_city_name):
                s.add(attr)
        return sorted(list(s))

    def denormalize_ts_data(self, target_city_name, attribute, ts_data):
        """
        将归一化后的趋势还原为原始值
        参数：
        - ts_data: 归一化后的趋势 [[datetime, trend_value]]
        """
        for source in self._source_list:
            for (city_name, attr), [min_val, max_val, epsilon] in source.get_preprocessed_norm().items():
                if city_name == target_city_name and attr == attribute:
                    new_ts_data = []
                    for (date, value) in ts_data:
                        new_ts_data.append([date, value * (max_val - min_val + epsilon) + min_val])
                    return new_ts_data
        return []

    def denormalize_trend_list(self, target_city_name, attribute, data):
        """
        将归一化后的趋势还原为原始值
        参数：
        - ts_data: 归一化后的趋势 [[datetime, trend_value]]
        """
        for source in self._source_list:
            for (city_name, attr), [min_val, max_val, epsilon] in source.get_preprocessed_norm().items():
                if city_name == target_city_name and attr == attribute:
                    new_data = []
                    for value in data:
                        new_data.append(value * (max_val - min_val + epsilon) + min_val)
                    return new_data
        return []

    def get_normed_history_data(self, target_city_name: str, attribute: str):
        for source in self._source_list:
            for (city_name, attr), ts_data in source.get_preprocessed_normed().items():
                if city_name == target_city_name and attr == attribute:
                    city_id = self._city_name_to_id_dict[city_name]
                    attr_id = self._attributes_to_id_dict[attr]
                    return city_id, attr_id, ts_data

    def get_predict_data(self, target_city_name: str, attribute: str):
        city_id, attr_id, ts_data = self.get_normed_history_data(target_city_name, attribute)
        seqs, _ = self._extract_sequences(ts_data)
        seqs = torch.tensor(np.array(seqs), dtype=torch.float32).to(device)

        return ts_data, city_id, attr_id, seqs


if __name__ == '__main__':
    print(MyDataLoader().get_available_attributes('Austin'))
