import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset

import struct
import numpy as np
import matplotlib.pyplot as plt

def loadResampDat(dat_path, enable_single = False):
    try:
        with open(dat_path, "rb") as fid:
            # 读取宽度和高度（int类型）
            width = struct.unpack('i', fid.read(4))[0]
            height = struct.unpack('i', fid.read(4))[0]
            img_size = np.array([height, width], dtype=int)

            # 读取控制点数量 N（int类型）
            N = struct.unpack('i', fid.read(4))[0]

            if enable_single:
                load_type = np.float32   
            else:
                load_type = np.float64
                
            # 读取控制点数据（double类型）并转换为 NumPy 数组
            vgcp_x = np.array(struct.unpack('d' * N, fid.read(8 * N)), dtype=load_type)
            vgcp_y = np.array(struct.unpack('d' * N, fid.read(8 * N)), dtype=load_type)
            vgcp_z = np.array(struct.unpack('d' * N, fid.read(8 * N)), dtype=load_type)
            vgcp_col = np.array(struct.unpack('d' * N, fid.read(8 * N)), dtype=load_type)
            vgcp_row = np.array(struct.unpack('d' * N, fid.read(8 * N)), dtype=load_type)
             
        return vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row, img_size

    except FileNotFoundError:
        raise FileNotFoundError(f"cannot open: {dat_path}")

class NormInfo:
    def __init__(self, vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row, img_size):
        
        x_range_min = vgcp_x.min()
        x_range_max = vgcp_x.max()
        x_range_min = np.floor(x_range_min * 1e6) / 1e6
        x_range_max = np.floor(x_range_max * 1e6) / 1e6
        self.m_longOffset = (x_range_max + x_range_min) / 2
        self.m_longScale = (x_range_max - x_range_min) / 2
        self.m_longOffset = np.floor(self.m_longOffset * 1e6) / 1e6
        self.m_longScale = np.floor(self.m_longScale * 1e6) / 1e6
        
        y_range_min = vgcp_y.min()
        y_range_max = vgcp_y.max()
        y_range_min = np.floor(y_range_min * 1e6) / 1e6
        y_range_max = np.floor(y_range_max * 1e6) / 1e6
        self.m_latOffset = (y_range_max + y_range_min) / 2
        self.m_latScale = (y_range_max - y_range_min) / 2
        self.m_latOffset = np.floor(self.m_latOffset * 1e6) / 1e6
        self.m_latScale = np.floor(self.m_latScale * 1e6) / 1e6
        
        z_range_min = vgcp_z.min()
        z_range_max = vgcp_z.max()
        z_range_min = np.floor(z_range_min / 1000) * 1000
        z_range_max = np.ceil(z_range_max / 1000) * 1000
        self.m_heightOffset = (z_range_max + z_range_min) / 2
        self.m_heightScale = (z_range_max - z_range_min) / 2
        self.m_heightOffset = np.floor(self.m_heightOffset * 1e6) / 1e6
        self.m_heightScale = np.floor(self.m_heightScale * 1e6) / 1e6
        
        self.m_sampOffset = img_size[1]/2
        self.m_sampScale = img_size[1]/2
        self.m_sampOffset = np.floor(self.m_sampOffset * 1e6) / 1e6
        self.m_sampScale = np.floor(self.m_sampScale * 1e6) / 1e6
        
        self.m_lineOffset = img_size[0]/2
        self.m_lineScale = img_size[0]/2
        self.m_lineOffset = np.floor(self.m_lineOffset * 1e6) / 1e6
        self.m_lineScale = np.floor(self.m_lineScale * 1e6) / 1e6
    
    def print(self):
        print("-----------")
        print("LongScale  : %.6f" % self.m_longScale)
        print("LatScale   : %.6f" % self.m_latScale)
        print("HeightScale: %.6f" % self.m_heightScale)
        print("SampScale  : %.6f" % self.m_sampScale)
        print("LineScale  : %.6f" % self.m_lineScale)
        print("-----------")
        print("LongOffset  : %.6f" % self.m_longOffset)
        print("LatOffset   : %.6f" % self.m_latOffset)
        print("HeightOffset: %.6f" % self.m_heightOffset)
        print("SampOffset  : %.6f" % self.m_sampOffset)
        print("LineOffset  : %.6f" % self.m_lineOffset)
        print("-----------")

    def normalize(self, vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row):
        vgcp_col = (vgcp_col - self.m_sampOffset) / self.m_sampScale
        vgcp_row = (vgcp_row - self.m_lineOffset) / self.m_lineScale
        vgcp_x = (vgcp_x - self.m_longOffset) / self.m_longScale
        vgcp_y = (vgcp_y - self.m_latOffset) / self.m_latScale
        vgcp_z = (vgcp_z - self.m_heightOffset) / self.m_heightScale
        return vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row

    def params(self):
        return [self.m_longScale, self.m_latScale, self.m_heightScale, self.m_sampScale, self.m_lineScale \
            , self.m_longOffset, self.m_latOffset, self.m_heightOffset, self.m_sampOffset, self.m_lineOffset]

class SimpleFCN(nn.Module):
    def __init__(self, input_channels, output_channels):
        super(SimpleFCN, self).__init__()
        self.m_fc = nn.Sequential(
            nn.Linear(input_channels, 16),
            nn.ReLU(),
            nn.Linear(16, output_channels)
        )
    
    def forward(self, x):
        return self.m_fc(x)

if __name__ == "__main__":
    id = 4
    train_dat_path = "./datas/%02d/resamp_%02d.dat" % (id, id)
    test_dat_path = "./datas/%02d/resamp_verify_%02d.dat" % (id, id)
    pth_path = "./datas/%02d/fcn_%02d.pth" % (id, id)
    param_path = "./datas/%02d/fcn_%02d.bin" % (id, id)
    
    # 读取数据
    vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row, img_size = loadResampDat(train_dat_path, True)
    vgcp_x_1, vgcp_y_1, vgcp_z_1, vgcp_col_1, vgcp_row_1, _ = loadResampDat(test_dat_path, True)
    
    # 归一化
    norm_info = NormInfo(vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row, img_size)
    norm_info.print()
    vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row = norm_info.normalize(vgcp_x, vgcp_y, vgcp_z, vgcp_col, vgcp_row)
    vgcp_x_1, vgcp_y_1, vgcp_z_1, vgcp_col_1, vgcp_row_1 = norm_info.normalize(vgcp_x_1, vgcp_y_1, vgcp_z_1, vgcp_col_1, vgcp_row_1)
    
    # 创建数据集
    train_data = np.column_stack((vgcp_x, vgcp_y, vgcp_z))
    train_data = torch.tensor(train_data, dtype=torch.float32)
    labels = np.column_stack((vgcp_col, vgcp_row))
    labels = torch.tensor(labels, dtype=torch.float32)
    dataset = TensorDataset(train_data, labels)
    
    train_data_1 = np.column_stack((vgcp_x_1, vgcp_y_1, vgcp_z_1))
    train_data_1 = torch.tensor(train_data_1, dtype=torch.float32)
    labels_1 = np.column_stack((vgcp_col_1, vgcp_row_1))
    labels_1 = torch.tensor(labels_1, dtype=torch.float32)
    dataset_1 = TensorDataset(train_data_1, labels_1)

    # 模型
    model = SimpleFCN(3, 2)
    
    # 训练
    batch_size = 64
    num_epochs = 100
    learn_rate = 1e-3
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr=learn_rate)
    
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        for batch_train, batch_labels in dataloader:
            # 前向传播
            outputs = model(batch_train)
            loss = criterion(outputs, batch_labels)

            # 反向传播和优化
            optimizer.zero_grad()  # 清空梯度
            loss.backward()        # 计算梯度
            optimizer.step()       # 更新参数

            # 累计损失
            running_loss += loss.item()

        # 打印每个 epoch 的损失
        epoch_loss = running_loss / len(dataloader)
        print(f'Epoch [{epoch+1:03d}/{num_epochs}], Loss: {epoch_loss:.8f}')
        
    torch.save(model.state_dict(), pth_path)    

    # 验证
    test_dataloader = DataLoader(dataset_1, batch_size=1, shuffle=False)
    err = []
    with torch.no_grad():  # 禁用梯度计算
        running_loss = 0.0
        for batch_data, batch_labels in test_dataloader:
            # 前向传播
            outputs = model(batch_data)
            
            loss = criterion(outputs, batch_labels)
            
            err.append(loss.item())
            running_loss += loss.item()
            
        epoch_loss = running_loss / len(test_dataloader)
        print(f'Test Loss: {epoch_loss:.8f}')
    
    err = np.array(err)
    
    plt.plot(err)
    plt.show()
    
    # 将模型保存为二进制文件
    state_dict = model.state_dict()
    param_list = np.ndarray((0, 1))
    for name, param in state_dict.items():
        print(f"Layer: {name}, Shape: {param.shape}")
        tmp = param.numpy()
        N = tmp.size
        tmp = np.reshape(tmp, (N, 1))
        param_list = np.concatenate((param_list, tmp), axis=0)
    
    print(param_list.shape)
    
    with open(param_path, "wb") as f:
        for item in norm_info.params():
            f.write(struct.pack('f', item))
        for item in param_list.flatten():
            f.write(struct.pack('f', item))