
import sys
sys.path.append('/Users/xbs/Code/HunterQuant')
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, models
import torch.nn.functional as F
import os
from PIL import Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import mplfinance as mpf
from collections import deque
import tempfile
import time
from torchvision.models.efficientnet import EfficientNet_B3_Weights
from kline2img import plot_kline_to_image, get_predict_kline_data

# ====================
# 数据集类
# ====================

class KlineDataset(Dataset):
    def __init__(self, data_dir, transform=None, max_samples_per_class=1000):
        """
        K线图像数据集
        
        参数:
            data_dir: 数据集根目录
            transform: 图像转换
            max_samples_per_class: 每类最大样本数（防止不平衡）
        """
        self.data_dir = data_dir
        self.transform = transform
        self.classes = ['double_bottom', 'other']
        self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)}
        
        # 收集图像路径和标签
        self.image_paths = []
        self.labels = []
        
        for cls_name in self.classes:
            cls_dir = os.path.join(data_dir, cls_name)
            if not os.path.exists(cls_dir):
                continue
                
            cls_images = [os.path.join(cls_dir, f) for f in os.listdir(cls_dir) 
                         if f.endswith('.png') or f.endswith('.jpg')]
            
            # 限制每类样本数量
            if len(cls_images) > max_samples_per_class:
                cls_images = cls_images[:max_samples_per_class]
                
            self.image_paths.extend(cls_images)
            self.labels.extend([self.class_to_idx[cls_name]] * len(cls_images))
    
    def __len__(self):
        return len(self.image_paths)
    
    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        label = self.labels[idx]
        
        # 加载图像
        image = Image.open(img_path).convert('RGB')
        
        if self.transform:
            image = self.transform(image)
        
        return image, label

# ====================
# 模型架构
# ====================

class KlinePatternModel(nn.Module):
    def __init__(self, num_classes=4, pretrained=True):
        """
        K线形态识别模型
        
        参数:
            num_classes: 分类数量
            pretrained: 是否使用预训练权重
        """
        super(KlinePatternModel, self).__init__()
        
        # 使用EfficientNet作为骨干网络
        # 使用新的weights参数加载EfficientNet
        if pretrained:
            weights = models.EfficientNet_B3_Weights.IMAGENET1K_V1
        else:
            weights = None
            
        # 修复：确保正确初始化backbone
        self.backbone = models.efficientnet_b3(weights=weights)   

        # 冻结前几层
        for param in self.backbone.parameters():
            param.requires_grad = False
        
        # 替换最后的分类层
        num_features = self.backbone.classifier[1].in_features
        self.backbone.classifier = nn.Identity()  # 移除原始分类器
        
        # 添加自定义分类头
        self.classifier = nn.Sequential(
            nn.Dropout(0.4),
            nn.Linear(num_features, 512),
            nn.BatchNorm1d(512),
            nn.SiLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(512, num_classes))
    
    def forward(self, x):
        features = self.backbone(x)
        return self.classifier(features)


def train_model(data_dir, model_save_path, epochs=30, batch_size=32, lr=1e-4):
    """
    训练K线形态识别模型
    
    参数:
        data_dir: 数据集目录
        model_save_path: 模型保存路径
        epochs: 训练轮数
        batch_size: 批次大小
        lr: 学习率
    """
    # 设备配置
    device = torch.device('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')
    print(f"Using device: {device}")
    
    # 数据转换
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], 
                             std=[0.229, 0.224, 0.225])
    ])
    
    # 创建数据集和数据加载器
    train_dataset = KlineDataset(os.path.join(data_dir, 'train'), transform=transform)
    val_dataset = KlineDataset(os.path.join(data_dir, 'val'), transform=transform)
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
    
    print(f"Training samples: {len(train_dataset)}, Validation samples: {len(val_dataset)}")
    
    # 初始化模型
    model = KlinePatternModel(num_classes=4)
    model.to(device)
    
    # 损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=1e-4)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3)
    
    # 训练循环
    best_val_acc = 0.0
    
    for epoch in range(epochs):
        # 训练阶段
        model.train()
        running_loss = 0.0
        correct = 0
        total = 0
        
        for images, labels in train_loader:
            images, labels = images.to(device), labels.to(device)
            
            # 前向传播
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            # 统计
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
        
        train_loss = running_loss / len(train_loader)
        train_acc = 100. * correct / total
        
        # 验证阶段
        val_loss, val_acc = evaluate_model(model, val_loader, device, criterion)
        

        # 手动打印学习率信息
        current_lr = optimizer.param_groups[0]['lr']
        print(f"Epoch {epoch+1} - Learning rate: {current_lr:.2e}")
        
        # 学习率调整       
        scheduler.step(val_acc)

        # 打印统计信息
        print(f"Epoch [{epoch+1}/{epochs}] | "
              f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}% | "
              f"Val Loss: {val_loss:.4f}, Val Acc: {val_acc:.2f}%")
        
        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), model_save_path)
            print(f"Saved best model with val acc: {val_acc:.2f}%")
    
    # 训练结束后添加以下清理代码
    print("Cleaning up resources...")
    
    # 1. 关闭数据加载器
    del train_loader
    del val_loader
    
    # 2. 清理GPU缓存
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
    
    # 3. 关闭所有matplotlib图形
    plt.close('all')
    
    # 4. 手动垃圾回收
    import gc
    gc.collect()
    
    print("Cleanup completed. Exiting program.")

    print("Training completed!")

def evaluate_model(model, data_loader, device, criterion):
    """评估模型性能"""
    model.eval()
    running_loss = 0.0
    correct = 0
    total = 0
    
    with torch.no_grad():
        for images, labels in data_loader:
            images, labels = images.to(device), labels.to(device)
            
            outputs = model(images)
            loss = criterion(outputs, labels)
            
            running_loss += loss.item()
            _, predicted = outputs.max(1)
            total += labels.size(0)
            correct += predicted.eq(labels).sum().item()
    
    val_loss = running_loss / len(data_loader)
    val_acc = 100. * correct / total
    
    return val_loss, val_acc

# ====================
# 实时扫描器
# ====================

class PatternScanner:
    def __init__(self, model_path, code, begin_date, end_date, device='cpu'):
        """
        K线形态实时扫描器
        
        参数:
            model_path: 模型文件路径
            window_size: K线窗口大小
            device: 计算设备
        """
        self.device = device
        self.code = code
        self.begin_date = begin_date
        self.end_date = end_date    
        # 加载模型
        self.model = KlinePatternModel(num_classes=4)
        self.model.load_state_dict(torch.load(model_path, map_location=device))
        self.model.to(device)
        self.model.eval()
        
        # 图像预处理
        self.transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], 
                                 std=[0.229, 0.224, 0.225])
        ])
     
    def detect(self, confidence_threshold=0.1):
        """
        检测当前窗口中的形态
        
        参数:
            confidence_threshold: 置信度阈值
            
        返回:
            形态类别和置信度
        """

        # 创建临时图像
        temp_img_path = os.path.join(tempfile.gettempdir(), f"temp_kline_{time.time()}.png")
        
        # 准备数据
        df_daily = get_predict_kline_data(self.code, self.begin_date, self.end_date)
        ohlc = df_daily
        ohlc['Date'] = pd.to_datetime(ohlc['Date'])
        ohlc = ohlc.set_index('Date')

        volume_df = df_daily['Volume']
        plot_kline_to_image(ohlc, volume_df, temp_img_path)
               
        # 加载并预处理图像
        image = Image.open(temp_img_path).convert('RGB')
        image = self.transform(image).unsqueeze(0).to(self.device)
        
        # 预测
        with torch.no_grad():
            output = self.model(image)
            probabilities = F.softmax(output, dim=1)
            print(probabilities)
            conf, pred = torch.max(probabilities, 1)
            conf = conf.item()
            pred = pred.item()
            
            # 清理临时文件
            try:
                os.remove(temp_img_path)
            except:
                pass
            
            if conf > confidence_threshold:
                return pred, conf
            else:
                return None, conf


if __name__ == '__main__':
# 1. 创建数据集目录结构
    data_dir = "kline_dataset"

    # 2. 训练模型
    model_path = "kline_pattern_model.pth"
    #train_model(data_dir, model_path, epochs=30, batch_size=32)

    # 3. 使用扫描器实时检测
    device = torch.device('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')
    code = "603615"
    begin_date = "2021-07-09"
    end_date = "2023-10-16"
    scanner = PatternScanner(model_path,code, begin_date, end_date, device=device)   

    pattern, confidence = scanner.detect()
    if pattern is not None:
        patterns = ["double_bottom", "other"]
        print(f"Detected pattern: {patterns[pattern]} with confidence: {confidence:.2f}") 