# encoding: utf-8
import json
import os
import numpy as np

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, IterableDataset


class MyIterDataset(IterableDataset):
    def __init__(self, file_path: str):
        self.file_path = file_path

    def __iter__(self):
        with open(self.file_path, "r", encoding="utf-8") as f:
            for line in f:
                new_line = json.loads(line)
                yield np.array(new_line[:10]), new_line[-1]


class MyDataset(Dataset):
    def __init__(self, file_path: str):
        self.data = []
        self.label = []
        self.load_data(file_path)

    def load_data(self, file_path: str):
        with open(file_path, "r", encoding="utf-8") as f:
            lines = f.readlines()
        lines = [json.loads(x) for x in lines if x.strip()]
        for line in lines:
            self.data.append(np.array(line[:10]))
            self.label.append(line[-1])

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.label[index]


class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.linear1 = torch.nn.Linear(10, 10)
        self.fc = torch.nn.Linear(10, 1)
        self.dropout = torch.nn.Dropout(0.4)
        self.activate = torch.nn.SELU()

    def forward(self, x):
        x = self.linear1(x)
        x = self.activate(x)
        x = self.dropout(x)
        x = self.fc(x)
        return x



def do_train():

    device = torch.device("cuda:0") if torch.cuda.is_available() else 'cpu'
    print("train device ==", device)

    model = Model()
    model.to(device)

    criterion = torch.nn.SmoothL1Loss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)

    # train_path = "./train.txt"
    train_path = "./demo.txt"
    epochs = 20
    # train_dataset = MyDataset(train_path)
    train_dataset = MyIterDataset(train_path)
    batch_size = 256
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=False)

    for epoch in range(epochs):
        loss_sum = 0.0
        accu = 0
        all_count = 1
        for i, (data, label) in enumerate(train_dataloader):
            model.train()
            data = data.to(device).float()
            label = label.to(device).float()
            all_count += len(data)
            out = model(data)
            out = out.squeeze(1)
            loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()  # 反向传播
            optimizer.step()  # 梯度更新

            loss_sum += loss.cpu().data.numpy()

        if all_count > 2:
            all_count -= 1
        loss_value = loss_sum / all_count
        print(f"epoch:{epoch}, loss:{loss_value}")
        if not os.path.exists("./weights"):
            os.mkdir("./weights")
        torch.save(model.state_dict(), f"./weights/model_{epoch}_loss_{loss_value}.pth")


if __name__ == '__main__':
    do_train()