
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import io
import PIL.Image, PIL.ImageDraw
import base64
import zipfile
import json
import requests
import numpy as np
import copy
import matplotlib.pylab as pl
import glob
from pathlib import Path
import tempfile
from IPython import display
from IPython.display import Image, HTML, clear_output
from tqdm import tqdm_notebook, tnrange

os.environ['FFMPEG_BINARY'] = 'ffmpeg'


import torch
import subprocess

import pydiffvg
import skimage
import skimage.io
import random
import ttools.modules
import argparse
import math
import torchvision
import torchvision.transforms as transforms
import requests
from io import BytesIO


import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import PIL
from time import time

import clip
import torch.nn.functional as F
from torchvision.datasets import CIFAR100

# 读入图像
def imread(url, max_size=None, mode=None):
  if url.startswith(('http:', 'https:')):
    r = requests.get(url)
    f = io.BytesIO(r.content)
  else:
    f = url
  img = PIL.Image.open(f)
  if max_size is not None:
    img = img.resize((max_size, max_size))
  if mode is not None:
    img = img.convert(mode)
  img = np.float32(img)/255.0
  return img

# 保存到本地
def checkin(img, out_path=None):
    save_img(img, str(out_path))
    return out_path

def save_img(img, file_name):
    img = np.transpose(img, (1, 2, 0))
    img = np.clip(img, 0, 1)
    img = np.uint8(img * 254)
    pimg = PIL.Image.fromarray(img, mode="RGB")
    pimg.save(file_name)

# 将矩阵表示的图像转换为PIL库中的Image对象
def np2pil(a):
  if a.dtype in [np.float32, np.float64]:
    a = np.uint8(np.clip(a, 0, 1)*255)
  return PIL.Image.fromarray(a)

#将图像（二维或三维矩阵）保存到文件或文件对象
def imwrite(f, a, fmt=None):
  a = np.asarray(a)
  if isinstance(f, str):
    fmt = f.rsplit('.', 1)[-1].lower()
    if fmt == 'jpg':
      fmt = 'jpeg'
    f = open(f, 'wb')
  np2pil(a).save(f, fmt, quality=95)

# 将图像编码为二进制数据的函数
def imencode(a, fmt='jpeg'):
  a = np.asarray(a)
  if len(a.shape) == 3 and a.shape[-1] == 4:
    fmt = 'png'
  f = io.BytesIO()
  imwrite(f, a, fmt)
  return f.getvalue()

# 将图像编码为URI格式的字符串
def im2url(a, fmt='jpeg'):
  encoded = imencode(a, fmt)
  base64_byte_string = base64.b64encode(encoded).decode('ascii')
  return 'data:image/' + fmt.upper() + ';base64,' + base64_byte_string

# 显示图像
def imshow(a, fmt='jpeg'):
  display(Image(data=imencode(a, fmt)))

# 对一组图像进行排列，并将其放在一个大的网格中
def tile2d(a, w=None):
  a = np.asarray(a)
  if w is None:
    w = int(np.ceil(np.sqrt(len(a))))
  th, tw = a.shape[1:3]
  pad = (w-len(a))%w
  a = np.pad(a, [(0, pad)]+[(0, 0)]*(a.ndim-1), 'constant')
  h = len(a)//w
  a = a.reshape([h, w]+list(a.shape[1:]))
  a = np.rollaxis(a, 2, 1).reshape([th*h, tw*w]+list(a.shape[4:]))
  return a

from torchvision import utils
# 将图像转换为`PIL.Image`对象，并显示
def show_img(img):
    img = np.transpose(img, (1, 2, 0))
    img = np.clip(img, 0, 1)
    img = np.uint8(img * 254)
    # img = np.repeat(img, 4, axis=0)
    # img = np.repeat(img, 4, axis=1)
    pimg = PIL.Image.fromarray(img, mode="RGB")
    imshow(pimg)

# 将一个图像进行放大
def zoom(img, scale=4):
  img = np.repeat(img, scale, 0)
  img = np.repeat(img, scale, 1)
  return img

# 对输入的`PIL.Image`对象进行缩放
def pil_resize_long_edge_to(pil, trg_size):
  short_w = pil.width < pil.height
  ar_resized_long = (trg_size / pil.height) if short_w else (trg_size / pil.width)
  resized = pil.resize((int(pil.width * ar_resized_long), int(pil.height * ar_resized_long)), PIL.Image.BICUBIC)
  return resized

# 定义了特征提取器，使用了VGG-16模型的前9层
class Vgg16_Extractor(nn.Module):
    def __init__(self, space):
        super().__init__()
        self.vgg_layers = models.vgg16(pretrained=True).features

        for param in self.parameters():
            param.requires_grad = False
        self.capture_layers = [1,3,6,8,11,13,15,22,29]
        self.space = space

    # 特征提取
    def forward_base(self, x):
        feat = [x]
        for i in range(len(self.vgg_layers)):
            x = self.vgg_layers[i](x)
            if i in self.capture_layers: feat.append(x)
        return feat

    # 初始化时若指定了一个非`vgg`的空间，则该函数还需要将输入进行标准化处理
    def forward(self, x):
        if self.space != 'vgg':
            x = (x + 1.) / 2.
            x = x - (torch.Tensor([0.485, 0.456, 0.406]).to(x.device).view(1, -1, 1, 1))
            x = x / (torch.Tensor([0.229, 0.224, 0.225]).to(x.device).view(1, -1, 1, 1))
        feat = self.forward_base(x)
        return feat

    #`X`表示图像集合的张量，`samps`表示要采样的像素点数量
    # 函数首先使用`forward`函数得到输入图像在各层的特征输出
    # 然后根据采样像素点的数量，从输入图像中采样一些像素点作为超列向量的基点
    # 接着，对于每一层的特征图，使用基点坐标在特征图中采样得到一些特征值
    # 并将这些特征值连接起来，最终得到特征向量（超列向量）
    def forward_samples_hypercolumn(self, X, samps=100):
        feat = self.forward(X)

        xx,xy = np.meshgrid(np.arange(X.shape[2]), np.arange(X.shape[3]))
        xx = np.expand_dims(xx.flatten(),1)
        xy = np.expand_dims(xy.flatten(),1)
        xc = np.concatenate([xx,xy],1)

        samples = min(samps,xc.shape[0])

        np.random.shuffle(xc)
        xx = xc[:samples,0]
        yy = xc[:samples,1]

        feat_samples = []
        for i in range(len(feat)):

            layer_feat = feat[i]

            # hack to detect lower resolution
            if i>0 and feat[i].size(2) < feat[i-1].size(2):
                xx = xx/2.0
                yy = yy/2.0

            xx = np.clip(xx, 0, layer_feat.shape[2]-1).astype(np.int32)
            yy = np.clip(yy, 0, layer_feat.shape[3]-1).astype(np.int32)

            features = layer_feat[:,:, xx[range(samples)], yy[range(samples)]]
            feat_samples.append(features.clone().detach())

        feat = torch.cat(feat_samples,1)
        return feat


# 使用PIL来打开本地图像文件
def pil_loader(path):
    with open(path, 'rb') as f:
        img = PIL.Image.open(f)
        return img.convert('RGB')

# 使用requests和BytesIO库来获取并打开网络上的图像
def pil_loader_internet(url):
    response = requests.get(url)
    img = PIL.Image.open(BytesIO(response.content))
    return img.convert('RGB')

# 使用了PyTorch中的F.interpolate函数来对图像进行采样缩放
# 其中采用的插值方法可以通过mode参数指定
def tensor_resample(tensor, dst_size, mode='bilinear'):
    return F.interpolate(tensor, dst_size, mode=mode, align_corners=False)

# 将图片缩小至目标大小，分别计算出原图相对于目标大小缩小的比例
# 并通过PIL库中的resize函数来进行缩小操作
def pil_resize_short_edge_to(pil, trg_size):
    short_w = pil.width < pil.height
    ar_resized_short = (trg_size / pil.width) if short_w else (trg_size / pil.height)
    resized = pil.resize((int(pil.width * ar_resized_short), int(pil.height * ar_resized_short)), PIL.Image.BICUBIC)
    return resized

# 将图片放大至目标大小，分别计算出原图相对于目标大小放大的比例
# 并通过PIL库中的resize函数来进行放大操作
def pil_resize_long_edge_to(pil, trg_size):
    short_w = pil.width < pil.height
    ar_resized_long = (trg_size / pil.height) if short_w else (trg_size / pil.width)
    resized = pil.resize((int(pil.width * ar_resized_long), int(pil.height * ar_resized_long)), PIL.Image.BICUBIC)
    return resized

# 将numpy数组转换为PIL图像对象
def np_to_pil(npy):
    return PIL.Image.fromarray(npy.astype(np.uint8))

# 将PIL图像对象转换为numpy数组
def pil_to_np(pil):
    return np.array(pil)

# PyTorch张量（一般为GPU上的tensor）转换成numpy数组
def tensor_to_np(tensor, cut_dim_to_3=True):
    if len(tensor.shape) == 4:
        if cut_dim_to_3:
            tensor = tensor[0]
        else:
            return tensor.data.cpu().numpy().transpose((0, 2, 3, 1))
    return tensor.data.cpu().numpy().transpose((1,2,0))

# numpy数组转换成PyTorch张量（一般为GPU上的tensor）
# 默认采用的是VGG网络的标准化方法，也可以根据space参数选择其他的标准化方法
def np_to_tensor(npy, space):
    if space == 'vgg':
        return np_to_tensor_correct(npy)
    return (torch.Tensor(npy.astype(np.float) / 127.5) - 1.0).permute((2,0,1)).unsqueeze(0)

def np_to_tensor_correct(npy):
    pil = np_to_pil(npy)
    transform = transforms.Compose([transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    return transform(pil).unsqueeze(0)


#用于计算拉普拉斯金字塔中的下采样/上采样残差部分
# 给定一个输入张量x，laplacian先将x下采样一次，再上采样回原始大小，最后将结果与x相减得到残差。
def laplacian(x):
    # x - upsample(downsample(x))
    return x - tensor_resample(tensor_resample(x, [x.shape[2] // 2, x.shape[3] // 2]), [x.shape[2], x.shape[3]])

# 用于构建拉普拉斯金字塔
# 给定一个输入张量x和金字塔的层数levels
# make_laplace_pyramid会先将x放入金字塔中作为最底层
# 然后依次计算每一层的残差并存入pyramid列表中
# 在计算完所有层的残差后，最后将x本身作为金字塔的最顶层。最后返回pyramid列表
def make_laplace_pyramid(x, levels):
    pyramid = []
    current = x
    for i in range(levels):
        pyramid.append(laplacian(current))
        current = tensor_resample(current, (max(current.shape[2] // 2,1), max(current.shape[3] // 2,1)))
    pyramid.append(current)
    return pyramid

# 将拉普拉斯金字塔折叠回原始图像。
# 给定一个拉普拉斯金字塔pyramid
# fold_laplace_pyramid从顶层开始依次将每一层的残差上采样到下一层的大小，再叠加到对应的下一层残差上
# 在处理完所有层后，最后将得到原始图像
def fold_laplace_pyramid(pyramid):
    current = pyramid[-1]
    for i in range(len(pyramid)-2, -1, -1): # iterate from len-2 to 0
        up_h, up_w = pyramid[i].shape[2], pyramid[i].shape[3]
        current = pyramid[i] + tensor_resample(current, (up_h,up_w))
    return current

# 用于随机采样像素位置
# 给定输入特征feat_content
# 会从feat_content中随机采样若干位置并返回这些位置的行列坐标
def sample_indices(feat_content, feat_style):
    indices = None
    const = 128**2 # 32k or so
    feat_dims = feat_style.shape[1]
    big_size = feat_content.shape[2] * feat_content.shape[3] # num feaxels

    stride_x = int(max(math.floor(math.sqrt(big_size//const)),1))
    offset_x = np.random.randint(stride_x)
    stride_y = int(max(math.ceil(math.sqrt(big_size//const)),1))
    offset_y = np.random.randint(stride_y)
    xx, xy = np.meshgrid(np.arange(feat_content.shape[2])[offset_x::stride_x], np.arange(feat_content.shape[3])[offset_y::stride_y] )

    xx = xx.flatten()
    xy = xy.flatten()
    return xx, xy

# 用于从输入的feat_result和feat_content中提取对应于输入的行列坐标(xx, xy)的特征
# 对于每一层特征，该函数会按照行列坐标计算用于特征提取的权重系数w
# 并利用这些权重系数对行列坐标附近的特征数据进行双线性插值
# 最终将所有层的样本结合成一个张量，同时在特征张量的第三维添加输入的行列坐标
# 返回两个张量x_st和c_st
def spatial_feature_extract(feat_result, feat_content, xx, xy):
    l2, l3 = [], []
    device = feat_result[0].device

    # for each extracted layer
    for i in range(len(feat_result)):
        fr = feat_result[i]
        fc = feat_content[i]

        # hack to detect reduced scale
        if i>0 and feat_result[i-1].size(2) > feat_result[i].size(2):
            xx = xx/2.0
            xy = xy/2.0

        # go back to ints and get residual
        xxm = np.floor(xx).astype(np.float32)
        xxr = xx - xxm

        xym = np.floor(xy).astype(np.float32)
        xyr = xy - xym

        # 双线性重采样
        w00 = torch.from_numpy((1.-xxr)*(1.-xyr)).float().view(1, 1, -1, 1).to(device)
        w01 = torch.from_numpy((1.-xxr)*xyr).float().view(1, 1, -1, 1).to(device)
        w10 = torch.from_numpy(xxr*(1.-xyr)).float().view(1, 1, -1, 1).to(device)
        w11 = torch.from_numpy(xxr*xyr).float().view(1, 1, -1, 1).to(device)

        xxm = np.clip(xxm.astype(np.int32),0,fr.size(2)-1)
        xym = np.clip(xym.astype(np.int32),0,fr.size(3)-1)

        s00 = xxm*fr.size(3)+xym
        s01 = xxm*fr.size(3)+np.clip(xym+1,0,fr.size(3)-1)
        s10 = np.clip(xxm+1,0,fr.size(2)-1)*fr.size(3)+(xym)
        s11 = np.clip(xxm+1,0,fr.size(2)-1)*fr.size(3)+np.clip(xym+1,0,fr.size(3)-1)

        fr = fr.view(1,fr.size(1),fr.size(2)*fr.size(3),1)
        fr = fr[:,:,s00,:].mul_(w00).add_(fr[:,:,s01,:].mul_(w01)).add_(fr[:,:,s10,:].mul_(w10)).add_(fr[:,:,s11,:].mul_(w11))

        fc = fc.view(1,fc.size(1),fc.size(2)*fc.size(3),1)
        fc = fc[:,:,s00,:].mul_(w00).add_(fc[:,:,s01,:].mul_(w01)).add_(fc[:,:,s10,:].mul_(w10)).add_(fc[:,:,s11,:].mul_(w11))

        l2.append(fr)
        l3.append(fc)

    x_st = torch.cat([li.contiguous() for li in l2],1)
    c_st = torch.cat([li.contiguous() for li in l3],1)

    xx = torch.from_numpy(xx).view(1,1,x_st.size(2),1).float().to(device)
    yy = torch.from_numpy(xy).view(1,1,x_st.size(2),1).float().to(device)

    x_st = torch.cat([x_st,xx,yy],1)
    c_st = torch.cat([c_st,xx,yy],1)
    return x_st, c_st

# 计算两个张量x和y之间的余弦相似度距离
# 首先对x和y中的每个向量进行L2范数归一化，然后将y按矩阵转置
# 计算x和y之间的内积，并将结果除以归一化后的L2范数相乘得到余弦相似度
# 最后用1减去余弦相似度得到距离
def pairwise_distances_cos(x, y):
    x_norm = torch.sqrt((x**2).sum(1).view(-1, 1))
    y_t = torch.transpose(y, 0, 1)
    y_norm = torch.sqrt((y**2).sum(1).view(1, -1))
    dist = 1.-torch.mm(x, y_t)/x_norm/y_norm
    return dist

# 计算两个张量x和y之间的L2范数距离的平方
# 使用PyTorch的矩阵运算对x和y的L2范数计算、转置及矩阵乘法进行计算
# 最终得到距离的平方，同时加入小量防止分母为零
def pairwise_distances_sq_l2(x, y):
    x_norm = (x**2).sum(1).view(-1, 1)
    y_t = torch.transpose(y, 0, 1)
    y_norm = (y**2).sum(1).view(1, -1)
    dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
    return torch.clamp(dist, 1e-5, 1e5)/x.size(1)

# 当cos_d=True时使用余弦相似度计算距离矩阵，否则使用L2距离计算距离矩阵
def distmat(x, y, cos_d=True):
    if cos_d:
        M = pairwise_distances_cos(x, y)
    else:
        M = torch.sqrt(pairwise_distances_sq_l2(x, y))
    return M

# 计算两个输入特征张量feat_result和feat_content之间的内容损失
# 将两个输入特征张量reshape成二维的矩阵X和Y（去掉最后两列的行列坐标）
# 然后计算X和Y之间的距离矩阵Mx和My，最后计算Mx和My之间的绝对差距的平均值d作为内容损失
def content_loss(feat_result, feat_content):
    d = feat_result.size(1)

    X = feat_result.transpose(0,1).contiguous().view(d,-1).transpose(0,1)
    Y = feat_content.transpose(0,1).contiguous().view(d,-1).transpose(0,1)

    Y = Y[:,:-2]
    X = X[:,:-2]
    # X = X.t()
    # Y = Y.t()

    Mx = distmat(X, X)
    Mx = Mx#/Mx.sum(0, keepdim=True)

    My = distmat(Y, Y)
    My = My#/My.sum(0, keepdim=True)

    d = torch.abs(Mx-My).mean()# * X.shape[0]
    return d

# 将RGB颜色表示的张量rgb转换为YUV颜色表示的张量yuv
def rgb_to_yuv(rgb):
    C = torch.Tensor([[0.577350,0.577350,0.577350],[-0.577350,0.788675,-0.211325],[-0.577350,-0.211325,0.788675]]).to(rgb.device)
    yuv = torch.mm(C,rgb)
    return yuv

# 计算两个图像之间的style损失，计算方式同content_loss
def style_loss(X, Y, cos_d=True):
    d = X.shape[1]

    if d == 3:
        X = rgb_to_yuv(X.transpose(0,1).contiguous().view(d,-1)).transpose(0,1)
        Y = rgb_to_yuv(Y.transpose(0,1).contiguous().view(d,-1)).transpose(0,1)
    else:
        X = X.transpose(0,1).contiguous().view(d,-1).transpose(0,1)
        Y = Y.transpose(0,1).contiguous().view(d,-1).transpose(0,1)

    CX_M = distmat(X, Y, cos_d=True)

    if d==3: CX_M = CX_M + distmat(X, Y, cos_d=False)

    m1, m1_inds = CX_M.min(1)
    m2, m2_inds = CX_M.min(0)

    remd = torch.max(m1.mean(), m2.mean())

    return remd

# 计算两个矩阵之间的矩损失
def moment_loss(X, Y, moments=[1,2]):
    loss = 0.
    X = X.squeeze().t()
    Y = Y.squeeze().t()

    mu_x = torch.mean(X, 0, keepdim=True)
    mu_y = torch.mean(Y, 0, keepdim=True)
    mu_d = torch.abs(mu_x - mu_y).mean()

    if 1 in moments:
        # print(mu_x.shape)
        loss = loss + mu_d

    if 2 in moments:
        X_c = X - mu_x
        Y_c = Y - mu_y
        X_cov = torch.mm(X_c.t(), X_c) / (X.shape[0] - 1)
        Y_cov = torch.mm(Y_c.t(), Y_c) / (Y.shape[0] - 1)

        # print(X_cov.shape)
        # exit(1)

        D_cov = torch.abs(X_cov - Y_cov).mean()
        loss = loss + D_cov

    return loss

# 首先通过 `spatial_feature_extract` 来进行特征提取
# 然后使用 `style_loss` 计算特征结果与风格特征之间的样式损失
# 同时也使用了 `moment_loss` 来计算矩损失来衡量两张图像的差异
# 接着，针对亮度信息进行了调整，最后将所有的损失进行合并，取得了图像的所有损失
def calculate_loss(feat_result, feat_content, feat_style, indices, content_weight, moment_weight=1.0):
  # spatial feature extract
  num_locations = 1024
  spatial_result, spatial_content = spatial_feature_extract(feat_result, feat_content, indices[0][:num_locations], indices[1][:num_locations])
  loss_content = content_loss(spatial_result, spatial_content)

  d = feat_style.shape[1]
  spatial_style = feat_style.view(1, d, -1, 1)
  feat_max = 3+2*64+128*2+256*3+512*2 # (sum of all extracted channels)

  loss_remd = style_loss(spatial_result[:, :feat_max, :, :], spatial_style[:, :feat_max, :, :])

  loss_moment = moment_loss(spatial_result[:,:-2,:,:], spatial_style, moments=[1,2]) # -2 is so that it can fit?
  # palette matching
  content_weight_frac = 1./max(content_weight,1.)
  loss_moment += content_weight_frac * style_loss(spatial_result[:,:3,:,:], spatial_style[:,:3,:,:])

  loss_style = loss_remd + moment_weight * loss_moment
  # print(f'Style: {loss_style.item():.3f}, Content: {loss_content.item():.3f}')

  style_weight = 1.0 + moment_weight
  loss_total = (loss_style) / (content_weight + style_weight)
  return loss_total

# 用于图像增强，提供两种数据增强方式，包括旋转、裁剪、缩放等，以及色彩空间的标准化转换
def get_image_augmentation(use_normalized_clip):
    augment_trans = transforms.Compose([
        transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.5),
        transforms.RandomResizedCrop(224, scale=(0.7,0.9)),
    ])

    if use_normalized_clip:
        augment_trans = transforms.Compose([
        transforms.RandomPerspective(fill=1, p=1, distortion_scale=0.5),
        transforms.RandomResizedCrop(224, scale=(0.7,0.9)),
        transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
    ])
    return augment_trans

# 用于生成曲线和组合图形的初始状态，通过随机的方式生成路径和控制点，生成路径和路径组
def initialize_curves(num_paths, canvas_width, canvas_height):
    shapes = []
    shape_groups = []
    for i in range(num_paths):
        num_segments = random.randint(1, 3)
        num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
        points = []
        p0 = (random.random(), random.random())
        points.append(p0)
        for j in range(num_segments):
            radius = 0.1
            p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5))
            p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5))
            p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5))
            points.append(p1)
            points.append(p2)
            points.append(p3)
            p0 = p3
        points = torch.tensor(points)
        points[:, 0] *= canvas_width
        points[:, 1] *= canvas_height
        path = pydiffvg.Path(num_control_points = num_control_points, points = points, stroke_width = torch.tensor(1.0), is_closed = False)
        shapes.append(path)
        path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), fill_color = None, stroke_color = torch.tensor([random.random(), random.random(), random.random(), random.random()]))
        shape_groups.append(path_group)
    return shapes, shape_groups

# 通过pydiffvg渲染器对图形进行渲染，从而创建一张绘画
# 渲染器需要给定曲线和组合图形，画布宽度和高度以及渲染迭代次数等参数
# 完成渲染后，函数将图像转换为张量形式返回
# 如果save参数的值为True，则将图像保存在指定路径下。
def render_drawing(shapes, shape_groups,\
                   canvas_width, canvas_height, n_iter, save=False):
    scene_args = pydiffvg.RenderFunction.serialize_scene(\
        canvas_width, canvas_height, shapes, shape_groups)
    render = pydiffvg.RenderFunction.apply
    img = render(canvas_width, canvas_height, 2, 2, n_iter, None, *scene_args)
    img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
    if save:
        pydiffvg.imwrite(img.cpu(), '/content/res/iter_{}.png'.format(int(n_iter)), gamma=1.0)
    img = img[:, :, :3]
    img = img.unsqueeze(0)
    img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
    return img


def style_clip_draw(prompt, style_path,
                    num_paths=256, num_iter=1000, max_width=50,
                    num_augs=4, style_weight=1.,
                    neg_prompt=None, neg_prompt_2=None,
                    use_normalized_clip=False,
                    debug=False):
    '''
    Perform StyleCLIPDraw using a given text prompt and style image
    args:
        prompt (str) : Text prompt to draw
        style_path(str) : Style image path or url
    kwargs:
        num_paths (int) : Number of brush strokes
        num_iter(int) : Number of optimization iterations
        max_width(float) : Maximum width of a brush stroke in pixels
        num_augs(int) : Number of image augmentations
        style_weight=(float) : What to multiply the style loss by
        neg_prompt(str) : Negative prompt. None if you don't want it
        neg_prompt_2(str) : Negative prompt. None if you don't want it
        use_normalized_clip(bool)
        debug(bool) : Print intermediate canvases and losses for debugging
    return
        np.ndarray(canvas_height, canvas_width, 3)
    '''
    # 加载输出图片路径
    out_path = Path(tempfile.mkdtemp()) / "out.png"
    # 使用CLIP模型对文本Prompt进行编码，得到文本特征，并可选地对负Prompt进行编码
    text_input = clip.tokenize(prompt).to(device)

    if neg_prompt is not None: text_input_neg1 = clip.tokenize(neg_prompt).to(device)
    if neg_prompt_2 is not None: text_input_neg2 = clip.tokenize(neg_prompt_2).to(device)

    # 计算特征
    with torch.no_grad():
        text_features = model.encode_text(text_input)
        if neg_prompt is not None: text_features_neg1 = model.encode_text(text_input_neg1)
        if neg_prompt_2 is not None: text_features_neg2 = model.encode_text(text_input_neg2)

    canvas_width, canvas_height = 224, 224

    # 图像增强变换
    augment_trans = get_image_augmentation(use_normalized_clip)

    # 使用`initialize_curves`函数随机生成初始图形。
    shapes, shape_groups = initialize_curves(num_paths, canvas_width, canvas_height)
    # 将生成的图形路径、路径宽度和颜色转换为可训练的参数，并定义对应的优化器。
    points_vars = []
    stroke_width_vars = []
    color_vars = []
    for path in shapes:
        path.points.requires_grad = True
        points_vars.append(path.points)
        path.stroke_width.requires_grad = True
        stroke_width_vars.append(path.stroke_width)
    for group in shape_groups:
        group.stroke_color.requires_grad = True
        color_vars.append(group.stroke_color)
    # 优化器
    lr = 1
    points_optim = torch.optim.Adam(points_vars, lr=1.0*lr)
    width_optim = torch.optim.Adam(stroke_width_vars, lr=0.1*lr)
    color_optim = torch.optim.Adam(color_vars, lr=0.01*lr)

    # 加载风格图片并转换为tensor
    style_pil = PIL.Image.open(str(style_path)).convert("RGB")
    style_pil = pil_resize_long_edge_to(style_pil, canvas_width)
    style_np = pil_to_np(style_pil)
    style = (np_to_tensor(style_np, "normal").to(device)+1)/2


    # 提取风格图像的特征获取feat_style
    feat_style = None
    for i in range(5):
        with torch.no_grad():
        # r is region of interest (mask)
            feat_e = extractor.forward_samples_hypercolumn(style, samps=1000)
            feat_style = feat_e if feat_style is None else torch.cat((feat_style, feat_e), dim=2)

    # 运行主优化循环
    for t in range(num_iter):

        # 学习率逐步退火来控制训练过程，以便生成的图像更加平稳。
        if t == int(num_iter * 0.5):
            for g in points_optim.param_groups:
                g['lr'] = 0.4
        if t == int(num_iter * 0.75):
            for g in points_optim.param_groups:
                g['lr'] = 0.1

        points_optim.zero_grad()
        width_optim.zero_grad()
        color_optim.zero_grad()

        # 使用render_drawing渲染函数生成图像
        img = render_drawing(shapes, shape_groups, canvas_width, canvas_height, t, save=(t % 5 == 0))

        # 基于生成的图像和文本特征、负样例、图像数据增强等多个条件来计算总损失。
        loss = 0
        img_augs = []
        if t < .9*num_iter:
            for n in range(num_augs):
                img_augs.append(augment_trans(img))
            im_batch = torch.cat(img_augs)
            image_features = model.encode_image(im_batch)
            for n in range(num_augs):
                # 使用余弦相似度作为损失函数，以克服KL散度造成的传统的稀疏问题
                loss -= torch.cosine_similarity(text_features, image_features[n:n+1], dim=1)
                if neg_prompt is not None: loss += torch.cosine_similarity(text_features_neg1, image_features[n:n+1], dim=1) * 0.3
                if neg_prompt_2 is not None: loss += torch.cosine_similarity(text_features_neg2, image_features[n:n+1], dim=1) * 0.3


        # 进行风格优化
        feat_content = extractor(img)
        # 随机选取两个style特征中的两个点作为样本index(xx, xy)
        xx, xy = sample_indices(feat_content[0], feat_style)
        np.random.shuffle(xx)
        np.random.shuffle(xy)

        # 计算styleloss，将其乘以style_weight，并将结果加到总loss上
        styleloss = calculate_loss(feat_content, feat_content, feat_style, [xx, xy], 0)
        loss += styleloss * style_weight

        # 调用反向传播算法backward()，并在每个优化器上进行一步优化(step())
        loss.backward()
        points_optim.step()
        width_optim.step()
        color_optim.step()

        # 裁剪每个路径的描边宽度(stroke_width)和描边颜色(stroke_color)以防止它们超出预定义的范围
        for path in shapes:
            path.stroke_width.data.clamp_(1.0, max_width)
        for group in shape_groups:
            group.stroke_color.data.clamp_(0.0, 1.0)

        # 每隔20步，将得出的生成图像储存在给定的输出路径(out_path)并输出迭代次数和渲染损失
        if t % 20 == 0:
            with torch.no_grad():
                shapes_resized = copy.deepcopy(shapes)
                for i in range(len(shapes)):
                    shapes_resized[i].stroke_width = shapes[i].stroke_width * 4
                    for j in range(len(shapes[i].points)):
                        shapes_resized[i].points[j] = shapes[i].points[j] * 4
                img = render_drawing(shapes_resized, shape_groups, canvas_width*4, canvas_height*4, t)
                yield checkin(img.detach().cpu().numpy()[0], out_path)
                print('Iteration:', t, '\tRender loss:', loss.item())

    # 将生成的图像shapes绘制在canvas上并保存
    with torch.no_grad():
        shapes_resized = copy.deepcopy(shapes)
        for i in range(len(shapes)):
            shapes_resized[i].stroke_width = shapes[i].stroke_width * 4
            for j in range(len(shapes[i].points)):
                shapes_resized[i].points[j] = shapes[i].points[j] * 4
        img = render_drawing(shapes_resized, shape_groups, canvas_width*4, canvas_height*4, t).detach().cpu().numpy()[0]
        save_img(img, str(out_path))
        yield out_path

import cog
import argparse
from pathlib import Path
import cogapp
from cog import BasePredictor
device, model, preprocess, extractor = None, None, None, None

# 使用PyTorch模型，结合Clip和Differentiable Parmaetric Vector Graphics (DIFfvg, Pydiffvg)
# 将文本描述转化为图片的类
class Predictor(BasePredictor):
    # 初始化
    def setup(self):
        global device, model, preprocess, extractor
        # 将device赋值为cuda或cpu，以检查cuda是否可用作为device
        device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        # 设置Pydiffvg是否输出执行时间与使用gpu
        pydiffvg.set_print_timing(False)
        pydiffvg.set_use_gpu(torch.cuda.is_available())
        pydiffvg.set_device(device)

        # 加载模型和预处理，分配模型和空间参数(Vgg16_Extractor)
        model, preprocess = clip.load('ViT-B/32', device, jit=False)

        extractor = Vgg16_Extractor(space="normal").to(device)


    def predict(self, prompt, style_image, num_paths, num_iterations,
                style_strength=50):
        """Run a single prediction on the model"""
        assert isinstance(num_paths, int) and num_paths > 0, 'num_paths should be an positive integer'
        assert isinstance(num_iterations, int) and num_iterations > 0, 'num_iterations should be an positive integer'
        # 对num_iterations进行判断并检索，num_iterations要求小于350，避免超时
        assert isinstance(style_strength, int) and style_strength >= 0 and style_strength <= 100, \
                'style_strength should be a positive integer less than 100'
        assert style_image is not None, 'style_image must be specified'
        assert prompt is not None and len(prompt) > 0, 'prompt must be specified'

        # 定义style_weight等于4乘上输入的style_strength，使得得到更好的色彩效果
        style_weight = 4 * (style_strength/100)

        #使用style_clip_draw函数，基于风格图片及文本生成图片并输出
        for path in style_clip_draw(prompt, str(style_image), num_paths=num_paths,\
                          num_iter=num_iterations, style_weight=style_weight, num_augs=10):
            yield path

        return path

predictor = Predictor()
predictor.setup()
prompt=input("Text description of the desired drawing：")
style_image =input("Style Image Path:")
num_paths = int(input("Number of drawing strokes:"))
num_iterations = int(input("Number of optimization iterations:"))
style_strength = int(input("How strong the style should be. (100 (max) is a lot. 0 (min) is no style):"))


for path in predictor.predict(prompt=prompt, style_image=style_image, num_paths=num_paths, num_iterations=num_iterations, style_strength=style_strength):
    # 处理输出路径
    print(path)
    #自动打开文件夹中的图片
    subprocess.run(["explorer", path])

#A man is walking the dog.
#./images/starry.jpg


