import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pandas as pd
from collections import defaultdict
import networkx as nx
import time

class GraphAttentionLayer(nn.Module):
    """图注意力层"""
    def __init__(self, in_features, out_features, dropout, alpha, concat=True):
        super(GraphAttentionLayer, self).__init__()
        self.dropout = dropout
        self.in_features = in_features
        self.out_features = out_features
        self.alpha = alpha
        self.concat = concat
        
        # 参数初始化
        self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
        nn.init.xavier_uniform_(self.W.data, gain=1.414)
        
        self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
        nn.init.xavier_uniform_(self.a.data, gain=1.414)
        
        # LeakyReLU激活函数
        self.leakyrelu = nn.LeakyReLU(self.alpha)
    
    def forward(self, input, adj):
        # 线性变换
        h = torch.mm(input, self.W)
        N = h.size()[0]  # 节点数量
        
        # 计算注意力系数
        a_input = torch.cat([h.repeat(1, N).view(N*N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2*self.out_features)
        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
        
        # 构建注意力权重矩阵
        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        
        # 应用注意力权重
        h_prime = torch.matmul(attention, h)
        
        # 输出处理
        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime
    
    def __repr__(self):
        return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'

class GAT(nn.Module):
    """图注意力网络"""
    def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
        super(GAT, self).__init__()
        self.dropout = dropout
        
        # 多头注意力层
        self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in range(nheads)]
        for i, attention in enumerate(self.attentions):
            self.add_module('attention_{}'.format(i), attention)
        
        # 输出层
        self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
    
    def forward(self, x, adj):
        x = F.dropout(x, self.dropout, training=self.training)
        x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
        x = F.dropout(x, self.dropout, training=self.training)
        x = F.elu(self.out_att(x, adj))
        return F.log_softmax(x, dim=1)

class GNNMarketAnalyzer:
    """GNN图神经网络市场分析器，用于板块联动分析"""
    def __init__(self, config=None):
        self.config = config or {
            'hidden_dim': 8,
            'output_dim': 4,
            'dropout': 0.6,
            'alpha': 0.2,
            'nheads': 8,
            'learning_rate': 0.005,
            'epochs': 200
        }
        
        self.model = None
        self.optimizer = None
        self.graph_cache = {}
        self.symbol_to_index = {}
        self.index_to_symbol = {}
        self.last_update_time = 0
        self.update_interval = 30  # 更新间隔（秒）
    
    def build_market_graph(self, market_data, correlation_threshold=0.7):
        """构建市场关联图"""
        # 提取所有股票的价格序列
        symbols = list(set([d.symbol for d in market_data]))
        symbol_count = len(symbols)
        
        # 创建符号到索引的映射
        self.symbol_to_index = {symbol: i for i, symbol in enumerate(symbols)}
        self.index_to_symbol = {i: symbol for symbol, i in self.symbol_to_index.items()}
        
        # 按符号分组数据
        symbol_data = defaultdict(list)
        for d in market_data:
            symbol_data[d.symbol].append((d.timestamp, d.price))
        
        # 创建价格矩阵
        price_matrix = np.zeros((symbol_count, max(len(data) for data in symbol_data.values())))
        
        # 填充价格矩阵
        for symbol, data in symbol_data.items():
            idx = self.symbol_to_index[symbol]
            prices = [d[1] for d in sorted(data, key=lambda x: x[0])]
            price_matrix[idx, :len(prices)] = prices
            # 填充缺失值
            if len(prices) < price_matrix.shape[1]:
                price_matrix[idx, len(prices):] = prices[-1]  # 前向填充
        
        # 计算收益率
        returns_matrix = np.diff(price_matrix, axis=1) / price_matrix[:, :-1]
        
        # 计算相关系数矩阵
        correlation_matrix = np.corrcoef(returns_matrix)
        
        # 构建邻接矩阵（根据相关性阈值）
        adj_matrix = np.zeros((symbol_count, symbol_count))
        for i in range(symbol_count):
            for j in range(symbol_count):
                if i != j and abs(correlation_matrix[i, j]) > correlation_threshold:
                    adj_matrix[i, j] = 1.0
        
        # 转换为PyTorch张量
        adj = torch.FloatTensor(adj_matrix)
        features = torch.FloatTensor(returns_matrix.T[-30:].T)  # 使用最近30个时间步的收益率作为特征
        
        # 如果特征维度不足，进行零填充
        if features.shape[1] < 10:
            padding = torch.zeros((features.shape[0], 10 - features.shape[1]))
            features = torch.cat([features, padding], dim=1)
        
        # 记录构建图的时间
        self.last_update_time = time.time()
        
        return adj, features
    
    def init_model(self, input_dim):
        """初始化GAT模型"""
        self.model = GAT(
            nfeat=input_dim,
            nhid=self.config['hidden_dim'],
            nclass=self.config['output_dim'],
            dropout=self.config['dropout'],
            nheads=self.config['nheads'],
            alpha=self.config['alpha']
        )
        
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config['learning_rate'])
        
    def train(self, adj, features, labels=None):
        """训练GNN模型"""
        # 检查模型是否初始化
        if self.model is None:
            self.init_model(features.shape[1])
        
        # 如果没有提供标签，使用自监督学习
        if labels is None:
            # 使用节点自身的特征作为标签（简单的自监督学习）
            labels = torch.argmax(features, dim=1) % self.config['output_dim']
        
        # 设置模型为训练模式
        self.model.train()
        
        # 训练循环
        for epoch in range(self.config['epochs']):
            self.optimizer.zero_grad()
            output = self.model(features, adj)
            loss = F.nll_loss(output, labels)
            loss.backward()
            self.optimizer.step()
        
        print(f"GNN model trained successfully")
        return True
    
    def analyze_sector_correlation(self, market_data, force_update=False):
        """分析板块联动关系"""
        # 检查是否需要更新图
        current_time = time.time()
        if not force_update and current_time - self.last_update_time < self.update_interval:
            # 使用缓存的图
            if 'cached_graph_result' in self.graph_cache:
                return self.graph_cache['cached_graph_result']
        
        # 构建市场图
        adj, features = self.build_market_graph(market_data)
        
        # 如果模型未初始化或强制更新，进行训练
        if self.model is None or force_update:
            self.train(adj, features)
        
        # 使用模型进行推理
        self.model.eval()
        with torch.no_grad():
            output = self.model(features, adj)
            predictions = torch.argmax(output, dim=1)
        
        # 构建联动分析结果
        sector_analysis = {}
        for symbol, idx in self.symbol_to_index.items():
            sector_analysis[symbol] = {
                'predicted_group': predictions[idx].item(),
                'correlated_symbols': []
            }
        
        # 找出每个股票的强相关股票
        adj_np = adj.numpy()
        for i, symbol_i in self.index_to_symbol.items():
            for j, symbol_j in self.index_to_symbol.items():
                if i != j and adj_np[i, j] > 0:
                    sector_analysis[symbol_i]['correlated_symbols'].append({
                        'symbol': symbol_j,
                        'correlation_strength': adj_np[i, j]
                    })
        
        # 按行业分组
        groups = defaultdict(list)
        for symbol, info in sector_analysis.items():
            groups[info['predicted_group']].append(symbol)
        
        # 计算板块强度
        sector_strengths = {}
        for group_id, symbols in groups.items():
            # 计算板块内的平均涨跌幅
            returns = []
            for symbol in symbols:
                # 假设market_data中包含最近的价格数据
                symbol_prices = [d.price for d in market_data if d.symbol == symbol]
                if len(symbol_prices) >= 2:
                    returns.append((symbol_prices[-1] - symbol_prices[0]) / symbol_prices[0])
            
            if returns:
                sector_strengths[group_id] = {
                    'avg_return': np.mean(returns),
                    'symbol_count': len(symbols),
                    'strength_score': abs(np.mean(returns)) * len(symbols)
                }
        
        result = {
            'sector_analysis': sector_analysis,
            'sector_groups': dict(groups),
            'sector_strengths': sector_strengths,
            'update_time': int(current_time * 1000),
            'total_symbols': len(self.symbol_to_index)
        }
        
        # 缓存结果
        self.graph_cache['cached_graph_result'] = result
        
        return result
    
    def detect_leading_stocks(self, market_data, top_n=5):
        """检测领涨领跌股"""
        # 先进行板块联动分析
        analysis_result = self.analyze_sector_correlation(market_data)
        
        # 按符号分组数据
        symbol_data = defaultdict(list)
        for d in market_data:
            symbol_data[d.symbol].append((d.timestamp, d.price))
        
        # 计算每个股票的收益率和波动性
        stock_metrics = {}
        for symbol, data in symbol_data.items():
            if len(data) < 2:
                continue
            
            # 排序并计算收益率
            sorted_data = sorted(data, key=lambda x: x[0])
            prices = [d[1] for d in sorted_data]
            returns = np.diff(prices) / prices[:-1]
            
            # 计算指标
            total_return = (prices[-1] - prices[0]) / prices[0]
            volatility = np.std(returns)
            
            # 获取板块信息
            group_id = analysis_result['sector_analysis'].get(symbol, {}).get('predicted_group', -1)
            
            stock_metrics[symbol] = {
                'return': total_return,
                'volatility': volatility,
                'group_id': group_id,
                'sharpe': total_return / volatility if volatility > 0 else 0
            }
        
        # 按收益率排序找出领涨领跌股
        sorted_stocks = sorted(stock_metrics.items(), key=lambda x: x[1]['return'], reverse=True)
        
        leading_stocks = {
            'top_gainers': sorted_stocks[:top_n],
            'top_losers': sorted_stocks[-top_n:][::-1],
            'timestamp': int(time.time() * 1000)
        }
        
        return leading_stocks
    
    def save_model(self, path='models/gnn_market_model.pth'):
        """保存模型"""
        if self.model is not None:
            torch.save({
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'symbol_to_index': self.symbol_to_index,
                'index_to_symbol': self.index_to_symbol
            }, path)
            print(f"GNN model saved to {path}")
            return True
        return False
    
    def load_model(self, path='models/gnn_market_model.pth'):
        """加载模型"""
        try:
            checkpoint = torch.load(path)
            self.model = GAT(
                nfeat=10,  # 假设使用固定的输入维度
                nhid=self.config['hidden_dim'],
                nclass=self.config['output_dim'],
                dropout=self.config['dropout'],
                nheads=self.config['nheads'],
                alpha=self.config['alpha']
            )
            self.model.load_state_dict(checkpoint['model_state_dict'])
            
            self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config['learning_rate'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            
            self.symbol_to_index = checkpoint['symbol_to_index']
            self.index_to_symbol = checkpoint['index_to_symbol']
            
            print(f"GNN model loaded from {path}")
            return True
        except Exception as e:
            print(f"Failed to load GNN model: {str(e)}")
            return False