import numpy as np
from strategy.datas.data import TwoDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from trade.model_base import FitBase


class PredModel2(nn.Module):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.history = 40
        self.seqfeature = 9
        self.hidden_size = 8
        self.other_size = 2
        self.num_layers = 5
        if self.seqfeature > 0:
            self.seqmodel = nn.LSTM(
                self.seqfeature, self.hidden_size, self.num_layers, batch_first=True
            )
        self.drop = 0.2
        combine_input = self.hidden_size + self.other_size
        self.combinemodel = nn.Sequential(
            nn.Linear(combine_input, 32),
            nn.Sigmoid(),
            nn.Dropout(p=self.drop),
            # nn.Linear(32, 16), nn.Sigmoid(), nn.Dropout(p=self.drop),
            nn.Linear(32, 3),
        )

    def forward(self, X: torch.Tensor):
        batch = X.shape[0]
        seqflatten_size = self.seqfeature * self.history
        if self.seqfeature > 0:
            h_0 = torch.zeros(
                self.num_layers, batch, self.hidden_size, device=X.device
            )  # 初始隐藏状态
            c_0 = torch.zeros(
                self.num_layers, batch, self.hidden_size, device=X.device
            )  # 初始细胞状态
            seqdata = X[:, :seqflatten_size].view(
                [batch, self.history, self.seqfeature]
            )
            output, (h_n, c_n) = self.seqmodel(seqdata, (h_0, c_0))
            seqout = output[:, -1, :]

        if seqflatten_size < X.shape[1]:
            addfeature = X[:, seqflatten_size:]
            combine_input = torch.concatenate([seqout, addfeature], dim=1)
        elif seqflatten_size == 0:
            combine_input = X
        else:
            combine_input = seqout

        final_result = self.combinemodel(combine_input)
        return final_result


class TrainModel2(FitBase):
    def __init__(self):
        self.model = PredModel2()

    def save_model(self, file: str):
        torch.save(self.model.state_dict(), file)

    def load_model(self, file: str):
        self.model.load_state_dict(torch.load(file))

    def fit(
        self,
        X,
        y,
        weights=None,
        epochs: int = 1,
        verbose: int = 500,
        batch_size: int = 32,
        device: str = "cuda",
        lr: float = 1e-3,
    ):

        Z = TwoDataset(X, y)
        ds = DataLoader(Z, batch_size=batch_size, shuffle=True)
        opti = torch.optim.Adam(self.model.parameters(), lr=lr)
        self.model = self.model.to(device)
        self.model.train()
        # criterion = nn.CrossEntropyLoss()
        weight_array = torch.ones(np.max(y) + 1)
        if weights:
            for k, v in weights.items():
                weight_array[k] = v
        else:
            unique_values, counts = np.unique(y, return_counts=True)
            for k, v in zip(unique_values, counts):
                weight_array[k] = len(y) / (v + 1)
        weight_array = weight_array.to(device, dtype=torch.float32)
        weight_array = weight_array / weight_array.sum()

        for epoch in range(epochs):
            all_loss = []
            for i, (x, y) in enumerate(ds):
                x = x.to(device, dtype=torch.float32)
                y = y.to(device, dtype=torch.long)
                pred_y = self.model(x)
                opti.zero_grad()
                loss = F.cross_entropy(pred_y, y, reduction="none")
                tmp_weight = weight_array[y]
                loss = (loss * tmp_weight).mean()
                loss.backward()
                opti.step()
                if i % verbose == 0:
                    print(f"train {epoch} batch {i}, \t loss {loss:.4f}")
                all_loss.append(loss.item())
            print(f"Done epoch {epoch}, avg loss: {np.mean(all_loss)}")
        self.model = self.model.to("cpu")

    def predict(self, X, batch_size: int = 1000):
        self.model.eval()
        ds = DataLoader(X, batch_size=batch_size, shuffle=False)
        results = []
        for x in ds:
            x = x.to(dtype=torch.float32)
            pred_y = self.model(x)
            results.append(pred_y.detach().numpy())
        results = np.concatenate(results, axis=0)
        results = np.argmax(results, axis=1)
        return results
