# -*- coding: utf-8 -*-
"""
Stock Data Tokenizer for Stock-GPT
将股票数据转换为类似NLP的Token序列，为Transformer模型提供输入
"""

import pandas as pd
import numpy as np
from typing import Dict, List, Tuple, Optional
import pickle
import os


class StockTokenizer:
    """
    股票数据分词器 - 将数值数据转换为离散Token
    
    核心思想：
    - 价格 → PRICE_XX token
    - 技术指标 → RSI_XX, MACD_XX token  
    - 股票代码 → STOCK_XXXXXX token
    - 日期 → DATE_YYYY_MM_DD token
    """
    
    def __init__(self):
        self.vocab = {}  # token -> id 映射
        self.id_to_token = {}  # id -> token 映射
        self.next_id = 0
        
        # 特殊token
        self.special_tokens = {
            '<PAD>': 0,
            '<UNK>': 1, 
            '<MASK>': 2,
            '<CLS>': 3,
            '<SEP>': 4
        }
        
        # 初始化特殊token
        for token, token_id in self.special_tokens.items():
            self.vocab[token] = token_id
            self.id_to_token[token_id] = token
            self.next_id = max(self.next_id, token_id + 1)
    
    def _add_token(self, token: str) -> int:
        """添加新token到词汇表"""
        if token not in self.vocab:
            self.vocab[token] = self.next_id
            self.id_to_token[self.next_id] = token
            self.next_id += 1
        return self.vocab[token]
    
    def _quantize_value(self, value: float, bins: int = 100, 
                       min_val: float = None, max_val: float = None) -> str:
        """将连续值量化为离散区间"""
        if pd.isna(value):
            return "NULL"
        
        # 使用百分位数进行自适应量化
        if min_val is None:
            min_val = value * 0.5
        if max_val is None:
            max_val = value * 1.5
        
        # 防止除零错误
        range_val = max_val - min_val
        if range_val == 0:
            return "050"  # 中间值
            
        # 将值映射到0-bins区间
        normalized = (value - min_val) / range_val
        normalized = np.clip(normalized, 0, 1)
        bin_id = int(normalized * (bins - 1))
        
        return f"{bin_id:03d}"  # 3位数字，如 "042"
    
    def _price_to_token(self, price: float, price_type: str = "CLOSE") -> str:
        """价格转token: PRICE_CLOSE_042"""
        quantized = self._quantize_value(price, bins=200)  # 价格使用更精细的分组
        return f"PRICE_{price_type}_{quantized}"
    
    def _indicator_to_token(self, value: float, indicator_name: str) -> str:
        """技术指标转token: RSI_065"""
        if indicator_name in ['RSI', 'Williams_%R', 'CCI']:
            # 0-100范围的指标
            quantized = self._quantize_value(value, bins=100, min_val=0, max_val=100)
        elif indicator_name in ['MACD', 'Signal_Line', 'MACD_Histogram']:
            # 可能为负的指标
            quantized = self._quantize_value(value, bins=100, min_val=-2, max_val=2)
        else:
            # 通用指标
            quantized = self._quantize_value(value, bins=100)
        
        return f"{indicator_name}_{quantized}"
    
    def _stock_to_token(self, stock_code: str) -> str:
        """股票代码转token: STOCK_000001"""
        return f"STOCK_{stock_code}"
    
    def _date_to_token(self, date_str: str) -> str:
        """日期转token: DATE_2025_01_15"""
        try:
            date_obj = pd.to_datetime(date_str)
            return f"DATE_{date_obj.year}_{date_obj.month:02d}_{date_obj.day:02d}"
        except:
            return "DATE_UNK"
    
    def encode_stock_data(self, df: pd.DataFrame, stock_code: str) -> List[List[int]]:
        """
        将股票数据DataFrame编码为token序列
        
        每一行数据转换为一个token序列：
        [STOCK_000001, DATE_2025_01_15, PRICE_OPEN_042, PRICE_CLOSE_045, RSI_065, MACD_012, ...]
        """
        sequences = []
        
        for idx, row in df.iterrows():
            sequence = []
            
            # 1. 股票代码token
            stock_token = self._stock_to_token(stock_code)
            sequence.append(self._add_token(stock_token))
            
            # 2. 日期token
            if '日期' in row:
                date_token = self._date_to_token(str(row['日期']))
                sequence.append(self._add_token(date_token))
            
            # 3. OHLCV价格tokens
            price_columns = ['open', 'high', 'low', 'close', 'volume']
            for col in price_columns:
                if col in row and not pd.isna(row[col]):
                    price_token = self._price_to_token(row[col], col.upper())
                    sequence.append(self._add_token(price_token))
            
            # 4. 技术指标tokens
            indicator_columns = ['RSI', 'MACD', 'Signal_Line', 'MACD_Histogram', 
                               'Williams_%R', 'CCI', 'EMA_12', 'EMA_26', 'SMA_5', 'SMA_10']
            
            for col in indicator_columns:
                if col in row and not pd.isna(row[col]):
                    indicator_token = self._indicator_to_token(row[col], col)
                    sequence.append(self._add_token(indicator_token))
            
            sequences.append(sequence)
        
        return sequences
    
    def decode_tokens(self, token_ids: List[int]) -> List[str]:
        """将token ids解码为可读字符串"""
        return [self.id_to_token.get(token_id, '<UNK>') for token_id in token_ids]
    
    def create_masked_sequence(self, sequence: List[int], mask_ratio: float = 0.15) -> Tuple[List[int], List[int]]:
        """
        创建Masked Language Model训练数据
        
        随机遮蔽15%的token，用于预训练
        """
        masked_sequence = sequence.copy()
        labels = [-100] * len(sequence)  # -100表示不计算loss
        
        for i in range(len(sequence)):
            if np.random.random() < mask_ratio:
                labels[i] = sequence[i]  # 保存原始token作为标签
                
                rand = np.random.random()
                if rand < 0.8:
                    # 80% 替换为 [MASK]
                    masked_sequence[i] = self.special_tokens['<MASK>']
                elif rand < 0.9:
                    # 10% 替换为随机token
                    masked_sequence[i] = np.random.randint(len(self.special_tokens), self.next_id)
                # 10% 保持原样
        
        return masked_sequence, labels
    
    def save(self, filepath: str):
        """保存tokenizer到文件"""
        tokenizer_data = {
            'vocab': self.vocab,
            'id_to_token': self.id_to_token,
            'next_id': self.next_id,
            'special_tokens': self.special_tokens
        }
        with open(filepath, 'wb') as f:
            pickle.dump(tokenizer_data, f)
    
    def load(self, filepath: str):
        """从文件加载tokenizer"""
        with open(filepath, 'rb') as f:
            tokenizer_data = pickle.load(f)
        
        self.vocab = tokenizer_data['vocab']
        self.id_to_token = tokenizer_data['id_to_token']
        self.next_id = tokenizer_data['next_id']
        self.special_tokens = tokenizer_data['special_tokens']
    
    @property
    def vocab_size(self) -> int:
        """词汇表大小"""
        return len(self.vocab)


def test_tokenizer():
    """测试tokenizer功能"""
    print("🧪 测试Stock Tokenizer...")
    
    # 创建示例数据
    sample_data = pd.DataFrame({
        '日期': ['2025-01-15', '2025-01-16'],
        'open': [11.5, 11.8],
        'high': [11.9, 12.1], 
        'low': [11.2, 11.6],
        'close': [11.7, 12.0],
        'volume': [1000000, 1200000],
        'RSI': [65.5, 72.1],
        'MACD': [0.15, 0.23],
        'Signal_Line': [0.12, 0.18]
    })
    
    # 初始化tokenizer
    tokenizer = StockTokenizer()
    
    # 编码数据
    sequences = tokenizer.encode_stock_data(sample_data, '000001')
    
    print(f"📊 词汇表大小: {tokenizer.vocab_size}")
    print(f"📝 生成序列数量: {len(sequences)}")
    
    # 显示第一个序列
    first_sequence = sequences[0]
    decoded = tokenizer.decode_tokens(first_sequence)
    
    print(f"🔢 第一个序列 token IDs: {first_sequence[:10]}...")
    print(f"📖 解码后的tokens: {decoded[:10]}...")
    
    # 测试masked sequence
    masked_seq, labels = tokenizer.create_masked_sequence(first_sequence)
    print(f"🎭 遮蔽序列长度: {len(masked_seq)}")
    print(f"🏷️  标签序列长度: {len(labels)}")
    
    print("✅ Tokenizer测试完成！")


if __name__ == "__main__":
    test_tokenizer()