import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from modeling import  ViTForImageClassification, ViTConfig

import os
from PIL import Image, ImageFont, ImageDraw
import numpy as np


import json
def loadJson(file='hanzi-jiegou.json'):
    hz = {}
    jg = {'上下':0, '上中下':1, '上半包围':2, '上三包围':3, '下半包围':4,
          '左右':5, '左中右':6, '左上包围':7, '左下包围':8, '左三包围':9,
          '右上包围':10, '右三包围':11, '全包围':12, '单体':13}
    with open(file, 'r', encoding='utf-8') as f:
        js = json.load(f)
        for k, v in js.items():
            hz[k] = jg[v['format']]
    return hz

def CreateFontImg(text):
    # 画布颜色
    image = Image.new("RGB", (224, 224), '#FFFFFF')
    draw = ImageDraw.Draw(image)
    # 字体样式
    fontPath = os.path.join("/System/Library/Fonts/", "STHeiti Light.ttc")
    fontSize = np.random.choice([100, 105, 110, 115, 120, 125, 130])
    font = ImageFont.truetype(fontPath, fontSize)
    # 文字颜色
    pos_rand = np.random.randint(-15, 15)  #位置随机数
    
    
    text_img = Image.new('RGB', (224, 224), color='#FFFFFF')
    text_draw = ImageDraw.Draw(text_img)
    #设置粗体
    ct = np.random.choice([1, 3])
    for i in range(ct):
        text_draw.text((35 + pos_rand + i, 35 + pos_rand), text, font=font, fill="#000000")
    
    # 旋转倾斜
    qx = np.random.choice([False, False])
    if qx:
        angle =   np.random.choice([-j for j in range(15, 25)])# 旋转角度
        text_img = text_img.rotate(angle, expand=1, fillcolor='#FFFFFF')
    
    image = Image.new("RGB", (224, 224), '#FFFFFF')
    ImageDraw.Draw(image)

    image.paste(text_img, (0, 0))  # 根据需要调整位置
    #val = np.array(image)
    #image.show()
    #print(ct, qx)
    return image

class CustomImageDataset(Dataset):
    def __init__(self, num=100000,  transform=None):
        """
        Args:
            annotations_file (str): 包含图片路径和标签的文件路径。
            img_dir (str): 图片所在的目录。
            transform (callable, optional): 可选的transform函数，用于图像预处理。
        """
        
        self.hz_jg, self.idx_hz = self.loadJson('hanzi-jiegou.json')  #汉字：(结构， 索引)
        self.labels1 = len(self.idx_hz)
        self.labels2 = 15
        self.transform = transform
        self.num = num
    
    def loadJson(self, file):
        hz_jg = {}
        idx_hz = {}
        jg = {'上下':0, '上中下':1, '上半包围':2, '上三包围':3, '下半包围':4,
            '左右':5, '左中右':6, '左上包围':7, '左下包围':8, '左三包围':9,
            '右上包围':10, '右三包围':11, '全包围':12, '单体':13,}
        cnter = {}
        with open(file, 'r', encoding='utf-8') as f:
            js = json.load(f)
            idx = 0
            for k, v in js.items():
                #print(k, v)
                fmt = v['format']
                if fmt == '下三包围':
                    fmt = '上下'
                hz_jg[k] = jg[fmt]
                idx_hz[idx] = k
                cnter[fmt] = cnter.get(fmt, 0) + 1
                idx = idx + 1
        print(len(hz_jg), cnter)
        return hz_jg, idx_hz
    
    def __len__(self):
        return self.num

    def __getitem__(self, idx):
        label1  = np.random.randint(0, self.labels1)
        token = self.idx_hz[label1]
        label2 = self.hz_jg[token]
        image = CreateFontImg(token)
        if self.transform:
            image = self.transform(image)

        return image, label1, label2

# 设置超参数
batch_size = 16
learning_rate = 0.0002
num_epochs = 20


# 数据预处理
transform = transforms.Compose([
    transforms.Resize((224, 224)),  # 调整图像大小以适应ViT模型
    transforms.RandomHorizontalFlip(),
    transforms.RandomVerticalFlip(),
    transforms.RandomRotation(20),
    transforms.ToTensor(),
])

# 加载数据集
train_dataset = CustomImageDataset(1000000, transform=transform)
test_dataset = CustomImageDataset(10000, transform=transform)

# 创建数据加载器
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,  shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size,  shuffle=False)

# 初始化ViT模型配置
config = ViTConfig()
config.num_hidden_layers = 12  # 您可以根据需要调整模型大小
config.num_attention_heads = 12
config.intermediate_size = 3072
config.hidden_size = 768
config.num_labels1 = 7363  # 设置类别数
config.num_labels2 = 14  # 设置类别数
config.problem_type = 'single_label_classification'
# 初始化模型
model = ViTForImageClassification(config)

# 损失函数和优化器

optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
# 学习率衰减
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1)

# 训练模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
model.to(device)

step = 0
for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    for images, labels1, labels2 in train_loader:
        step += 1
        images = images.to(device)
        labels1 = labels1.to(device)
        labels2 = labels2.to(device)
        # 前向传播
        outputs = model(images, labels1=labels1, labels2=labels2)
        loss = outputs.loss

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        scheduler.step()
        if step % 10 == 0:
            print(f"step {step}, loss {loss.item()}")
        running_loss += loss.item()

    epoch_loss = running_loss / len(train_loader)
    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}')

    # 测试模型
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels1, labels2 in test_loader:
            images = images.to(device)
            labels1 = labels1.to(device)
            labels2 = labels2.to(device)
            outputs = model(images)
            _, predicted2 = torch.max(outputs.logits2, 1)
            total += labels2.size(0)
            correct += (predicted2 == labels2).sum().item()

    accuracy = 100 * correct / total
    print(f'Accuracy of the model on the test images: {accuracy:.2f}%')

    # 保存模型
    torch.save(model.state_dict(), f'./models/vit_ep_{epoch}.pth')
