import torch
import pandas
import torch.nn as nn
import numpy as np
import p
import m
import torch.nn.init as init
import torch.nn.functional as F
from torch import nn,optim
import random
import os
from datetime import datetime
import pickle
import matplotlib.pyplot as plt
import seed
import focal
import ms
seed.seed_everything()

def initialize_weights(a):
    for name, module in a.named_children():
        if isinstance(module, nn.Conv1d) or isinstance(module, nn.Linear):
            # 获取模块中的权重参数
            weight = getattr(module, 'weight')
            # 使用 He 初始化
            init.kaiming_normal_(weight, mode='fan_out', nonlinearity='relu')
            # 如果有偏置参数，也初始化为零
            if hasattr(module, 'bias') and module.bias is not None:
                bias = getattr(module, 'bias')
                init.constant_(bias, 0)
        elif isinstance(module, nn.Module):
            # 如果是子模块，递归调用 initialize_weights 方法
            initialize_weights(module)

class Summode(nn.Module):
    def __init__(self):
        super(Summode, self).__init__()
        j = 0
        for i in range(0,4):
            # print(j)
            exec("self.h" + str(j) + "=m.EmotionRecognitionModel(1, 3)")
            exec("self.h" + str(j+1) + "=m.fowardNN(15, 3, 124)")
            j = j + 2
        self.h8 = nn.Linear(8, )
        #initialize_weights(self)
    def forward(self, x):
        with torch.no_grad():
            x11 = p.find_peak_frequency(x)
            x12 = p.detect_onset_offset(x)
            x13 = p.detect_peak_frequencies(x)
            x14 = p.hnrget(x)
        x21 = self.h0(x11)
        x22 = self.h1(x11)
        x23 = self.h2(x12)
        x24 = self.h3(x12)
        x25 = self.h4(x13)
        x26 = self.h5(x13)
        x27 = self.h6(x14)
        x28 = self.h7(x14)
        x2 = torch.stack([torch.argmax(x21), torch.argmax(x22), torch.argmax(x23), torch.argmax(x24),
                        torch.argmax(x25), torch.argmax(x26), torch.argmax(x27), torch.argmax(x28)]
                         ).float()
        #print(x14)
        y = self.h8(x2)
        #y = torch.squeeze(y,0)
        #print(x21)
        return (x21,x22,x23,x24,x25,x26,x27,x28,y)
        #return F.softmax(y,dim=0)
model = Summode()
model = model.cuda()
#m_loss = focal.Focal_Loss(torch.tensor([1.61,4.17,7.11]).cuda())
m_loss = nn.CrossEntropyLoss()
# 定义优化器
for i in range(9):
    exec(f"optimizer{i} = optim.Adam(model.h{i}.parameters(),lr=0.001)")
# 定义模型训练和测试的方法

if __name__=="__main__":
    datastr = r"D:/emotiondataset/"
    b = pandas.read_csv(r"D:\old\Desktop\old\animal\barking-emotion-recognition\data\dataset_2.csv")
    loss_0 = 999999
    batchsize=3
    testsize=3
    with open(r"D:/emotiondataset/"+"trainlist","rb") as f:
        trainlist=pickle.load(f)
    train_loss = []
    val_loss = []
    epoch = []
    model.train()
    try:
        for j in range(10000000000000):
            index = random.choices(trainlist[random.randint(0, 2)])[0]
            if os.path.exists(datastr + str(index)):
                f = open(datastr + str(index), "rb")
                inputs = pickle.load(f)
            # 获得一个批次的数据和标签
            # print(inputs)
            # print(inputs[0])
            inputs[0] = inputs[0].cuda()
            out = model(inputs[0])
            #print(out)
            # 交叉熵代价函数out（batch，C：类别的数量），labels（batch）
            # print(inputs[1])
            childloss=[]
            childout=[]
            # 梯度清零
            for i in range(9):
                #print(inputs[1].cuda(), out[i])
                loss = m_loss(out[i],inputs[1].cuda())
                childout.append(int(torch.argmax(out[i])))
                #print(out[i])
                exec(f"optimizer{i}.zero_grad()")
                loss.backward(retain_graph=True)
                childloss.append(round(float(loss),3))
                exec(f"optimizer{i}.step()")
            #print(childloss)
            print(childout,int(torch.argmax(inputs[1])))
            '''
            for name, param in model.h7.named_parameters():
                if param.requires_grad:
                    print(name,param)
            
            
            #print(loss,out,torch.argmax(out))
            #pass
            # 修改权值
            model.eval()
            # 1/0
            index = random.randint(401, 604)
            if os.path.exists(datastr + str(index)):
                f = open(datastr + str(index), "rb")
                inputs = pickle.load(f)
            out = model(inputs[0].cuda())
            # 交叉熵代价函数out（batch，C：类别的数量），labels（batch）
            test_loss = float(m_loss(inputs[1].cuda(), out))
            #print(loss, test_loss,out)
            train_loss.append(float(loss))
            val_loss.append(test_loss)
            epoch.append(j)
            if test_loss < loss_0:
                loss_0 = test_loss
                try:
                    torch.save(model, 'save.pt')
                except:
                    pass
                # print(loss_0, datetime.now())
            '''
    except KeyboardInterrupt:
        pass
    plt.figure()
    plt.title('loss during training')  # 标题
    plt.plot(epoch, train_loss, label="train_loss")
    plt.plot(epoch, val_loss, label="valid_loss")
    plt.legend()
    plt.grid()
    plt.show()