import numpy as np
import qrcode
import json
from multiprocessing import Pool, freeze_support
import pandas as pd
import cv2


from glob import glob

from tqdm import tqdm


def generate_qrcode(data, b):
    # 创建一个二维码实例
    qr = qrcode.QRCode(
        version=1,
        error_correction=qrcode.constants.ERROR_CORRECT_L,
        box_size=b,
        border=1,
    )
    # 添加数据
    qr.add_data(data)
    qr.make(fit=True)
    # 生成二维码图像

    img = qr.make_image(fill_color="green", back_color="white")
    return np.array(img).shape


def generate_qrcode_image(data, b):
    # 创建一个二维码实例
    qr = qrcode.QRCode(
        version=1,
        error_correction=qrcode.constants.ERROR_CORRECT_L,
        box_size=b,
        border=1,
    )
    # 添加数据
    qr.add_data(data)
    qr.make(fit=True)
    # 生成二维码图像
    img = qr.make_image(fill_color="black", back_color="white")
    # img.show()

    return np.array(img)
    # 保存二维码图像


def return_qrcode_shape():
    qr_sl_box_size_dict = dict()
    for i in tqdm(range(1, 489)):
        for j in range(1, 10):
            x, y, c = generate_qrcode('中' * i, j)
            if x >= 128:
                print(i, j, x)
                qr_sl_box_size_dict[i] = j
                break
    return qr_sl_box_size_dict


def gen_image(j, one, qr_sl_dict):
    data = dict()
    for i, o in enumerate(one):

        img = generate_qrcode_image(o[1], qr_sl_dict.get(len(o[1]), 1))


        # 假设 img 是一个读取的 RGB 图像
        # 首先转换为灰度图像

        print(i,j)

        # 然后调整图像大小到 128x128 像素
        resized_img = cv2.resize(img.astype("uint8") , (128, 128))

        # 假设 data 是一个用于存储图像数据的 numpy 数组
        # 将调整大小后的图像存入 data 的第 i 个位置
        data[i] = resized_img
    pd.to_pickle(data, 'E:/text_image/{}_{}.pkl'.format(j, 'data'))


def gen_text_to_image():
    with open('唐诗.json', 'r', encoding='utf-8') as f:
        dataset = json.load(f)

    two = [two_data[4].replace('\n', '') for two_data in dataset]
    two = [i for i in two if 32 <= len(i) <= 72]
    two = [i for i in two if len(i) == 32]

    two_list = []
    for t in tqdm(two):
        t_list = []
        word = ''
        for one in t[:-1]:
            word += one
            label = t[len(word)]
            t_list.append([label, word])
        two_list.append(t_list)

    qr_sl = return_qrcode_shape()

    pool = Pool(processes=7)

    for j, one in enumerate(two_list):
        # gen_image(j, one, qr_sl)
        pool.apply_async(gen_image, args=(j, one, qr_sl))


    pool.close()
    pool.join()


import paddle


class VlmBlock(paddle.nn.Layer):
    def __init__(self, input_dim, output_dim, down_flag):
        super(VlmBlock, self).__init__()
        self.down_flag = down_flag
        self.one_layer = paddle.nn.Conv2D(input_dim, output_dim, 3, padding=1, bias_attr=False)
        self.two_layer = paddle.nn.Conv2D(input_dim, output_dim, 3, padding=1, bias_attr=False)
        self.three_layer = paddle.nn.Conv2D(input_dim, 2 * output_dim, 3, padding=1, bias_attr=False)
        if down_flag:
            self.down_layer = paddle.nn.MaxPool2D(2, 2)
        self.relu = paddle.nn.ReLU()

    def forward(self, x):
        x0 = self.one_layer(x)
        x1 = self.two_layer(x)
        x2 = self.three_layer(x)
        x = self.relu(paddle.concat([x0, x1], axis=1) + x2)
        if self.down_flag:
            x = self.down_layer(x)
        return x


class VLM(paddle.nn.Layer):
    def __init__(self, class_num):
        super(VLM, self).__init__()
        self.one_layer = VlmBlock(1, 3, True)
        self.two_layer = VlmBlock(6, 6, False)
        self.three_layer = VlmBlock(12, 12, True)
        self.four_layer = VlmBlock(24, 12, False)
        self.five_layer = VlmBlock(24, 12, True)
        self.six_layer = VlmBlock(24, 12, True)
        # 新增一个1x1卷积层来调整维度，如果需要的话

        self.out_layer = paddle.nn.Linear(1536, class_num)
        self.out_layer1 = paddle.nn.Linear(1536, class_num)

    def forward(self, x):
        x = self.one_layer(x)
        x = self.two_layer(x)
        x = self.three_layer(x)
        x = self.four_layer(x)
        x = self.five_layer(x)
        x = self.six_layer(x)
        x0 = self.out_layer(x.reshape([x.shape[0], -1]))
        x1 = self.out_layer1(x.reshape([x.shape[0], -1]))
        return x0, x1


class VlmLoss(paddle.nn.Layer):
    def __init__(self):
        super(VlmLoss, self).__init__()

    def forward(self, x, y, xx):
        loss = paddle.nn.functional.cross_entropy(x * (1 - paddle.nn.functional.softmax(xx, axis=-1)), y)
        return loss

def train():
    with open('唐诗.json', 'r', encoding='utf-8') as f:
        dataset = json.load(f)

    two = [two_data[4].replace('\n', '') for two_data in dataset]
    two = [i for i in two if 32 <= len(i) <= 72]
    two = [i for i in two if len(i) == 32]

    two_list = []
    for t in tqdm(two):
        t_list = []
        word = ''
        for one in t[:-1]:
            word += one
            label = t[len(word)]
            t_list.append(label)
        two_list.append(t_list)
    voc=sorted(set(np.hstack([list(set(list(i))) for i in two])))
    two_list={i:[voc.index(ii) for ii in label] for i,label in  enumerate(tqdm(two_list))}

    data_set = dict()
    data_index = 0
    for i,j in tqdm(two_list.items()):
        one=pd.read_pickle("text_image/{}_{}.pkl".format(i, "data"))
        for iii,ii in enumerate(j):
            data_set[data_index]=[ii,one.get(iii)]
            data_index += 1





    epoch = 100
    batch_size = 800*5
    data_set_total = []
    data_set_index=list(data_set.keys())
    for i in range(epoch):
        np.random.shuffle(data_set_index)
        data_set_total += data_set_index

    vlm = VLM(class_num=len(voc))
    vlm.load_dict(paddle.load("/home/aistudio/vlm.pdparams"))
    optimizer = paddle.optimizer.Adam(parameters=vlm.parameters(), learning_rate=0.0003)
    loss_func = VlmLoss()
    bar = tqdm(range(0, len(data_set_total), batch_size))
    for i in bar:
        j = i + batch_size
        batch_data = data_set_total[i:j]
        batch_label = [data_set.get(i)[0] for i in batch_data]
        batch_input = [data_set.get(i)[1] for i in batch_data]
        batch_input = (paddle.to_tensor(batch_input).astype('float32')+3) /5
        batch_label = paddle.to_tensor(np.array(batch_label).astype(int)).astype('int64')
        vlm_out = vlm(batch_input.reshape([-1, 1, 128, 128]))
        loss = loss_func(vlm_out[0], batch_label, vlm_out[1])
        bar.set_description("loss: {:.4f}".format(loss.item()))
        optimizer.clear_grad()
        loss.backward()
        optimizer.step()

    paddle.save(vlm.state_dict(), 'vlm.pdparams')

def predict():
    with open('唐诗.json', 'r', encoding='utf-8') as f:
        dataset = json.load(f)

    two = [two_data[4].replace('\n', '') for two_data in dataset]
    two = [i for i in two if 32 <= len(i) <= 72]
    two = [i for i in two if len(i) == 32]

    two_list = []
    for t in tqdm(two):
        t_list = []
        word = ''
        for one in t[:-1]:
            word += one
            label = t[len(word)]
            t_list.append(label)
        two_list.append(t_list)
    voc = sorted(set(np.hstack([list(set(list(i))) for i in two])))
    vlm = VLM(class_num=len(voc))
    vlm.load_dict(paddle.load("/home/aistudio/vlm.pdparams"))
    vlm.eval()

    qr_sl=return_qrcode_shape()
    word = "我"
    for _ in range(31):
        cv2_image = generate_qrcode_image(word,qr_sl.get(len(word),1))
        cv2_image = cv2.resize(cv2_image.astype("uint8"), (128, 128))

        cv2_image = paddle.to_tensor(cv2_image).astype('float32').reshape([1, 1, 128, 128])
        cv2_image = (paddle.to_tensor(cv2_image).astype('float32') + 3) / 5
        vlm_out = vlm(cv2_image)
        vlm_out = vlm_out[0] * (1 - paddle.nn.functional.softmax(vlm_out[1], axis=-1))
        word += voc[paddle.argmax(vlm_out, -1)]
        print(word)


if __name__ == '__main__':
    freeze_support()
    # gen_text_to_image()
    # train()
    predict()

