import numpy as np
import pandas as pd
import torch
from torch import nn
from sklearn.model_selection import train_test_split
import torch.nn.functional as F

# 数据生成函数
def generate_data(num_samples=1000):
    np.random.seed(42)
    win_odds = np.round(np.random.uniform(1.0, 3.0, num_samples), 2)  # 主胜赔率
    draw_odds = np.round(np.random.uniform(2.0, 4.0, num_samples), 2)  # 平局赔率
    lose_odds = np.round(np.random.uniform(1.0, 3.0, num_samples), 2)  # 客胜赔率
    handicap = np.round(np.random.uniform(-1.0, 1.0, num_samples), 2)  # 盘口
    results = np.random.choice([1, 2, 3], size=num_samples, p=[0.4, 0.2, 0.4])  # 主胜、平局、客胜的概率

    data = pd.DataFrame({
        'win_odds': win_odds,
        'draw_odds': draw_odds,
        'lose_odds': lose_odds,
        'handicap': handicap,
        'result': results
    })
    return data

# 数据预处理
def preprocess_data(data):
    # 假设数据包含胜、平、负赔率和盘口信息
    features = data[['win_odds', 'draw_odds', 'lose_odds', 'handicap']].values
    labels = data['result'].values  # 结果标签
    return features, labels


class SimpleNNModel(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(SimpleNNModel, self).__init__()
        self.fc1 = nn.Linear(input_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, output_dim)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)  # 不需要 softmax，CrossEntropyLoss 内部会处理
        return x

# Transformer 模型定义
class TransformerModel(nn.Module):
    def __init__(self, input_dim, model_dim, num_heads, num_layers):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Linear(input_dim, model_dim)
        self.transformer = nn.Transformer(d_model=model_dim, nhead=num_heads, num_encoder_layers=num_layers, batch_first=True)
        self.fc = nn.Linear(model_dim, 1)  # 输出层

    def forward(self, src):
        src = self.embedding(src)
        tgt = src.clone()  
        src = self.transformer(src, tgt)
        src = self.fc(src)
        return F.softmax(src, dim=-1)  # 使用 softmax 激活函数

# 训练模型
def train_model(model, features, labels, epochs=10000, lr=0.0024):
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    criterion = nn.CrossEntropyLoss()
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)

    for epoch in range(epochs):
        model.train()
        optimizer.zero_grad()
        
        features_tensor = torch.FloatTensor(features)
        # 将标签转换为从 0 开始的索引
        labels_tensor = torch.LongTensor(labels - 1)  # 减1以匹配 PyTorch 的预期
        
        outputs = model(features_tensor)
        
        # 确保输出形状为 (batch_size, num_classes)
        if outputs.shape[1] != 3:
            raise ValueError(f'Unexpected output shape: {outputs.shape}')
        
        loss = criterion(outputs, labels_tensor)
        loss.backward()
        optimizer.step()
        scheduler.step()

        if epoch % 10 == 0:
            print(f'Epoch {epoch}, Loss: {loss.item()}')

# 主程序
if __name__ == "__main__":
    # 假设我们有一个包含比赛数据的 DataFrame
    data = generate_data(num_samples=1000)  # 生成1000条数据
    features, labels = preprocess_data(data)

    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.2, random_state=42)

    # 初始化模型
    model = SimpleNNModel(input_dim=4, hidden_dim=64, output_dim=3)

    # 训练模型
    train_model(model, X_train, y_train)

    # 预测
    model.eval()
    with torch.no_grad():
        predictions = model(torch.FloatTensor(X_test))
        predicted_classes = torch.argmax(predictions, dim=1) + 1  # 选择最大概率的类别并加1
        print(X_test)
        print(predicted_classes)