import torch 
from torch.utils import data # 获取迭代数据
from torch.autograd import Variable # 获取变量
import torchvision
import matplotlib.pyplot as plt
import glob,os
import numpy as np
from tqdm import tqdm
import torch.nn as nn
from torchvision import transforms as transforms
from torch.utils.data import DataLoader,Dataset
import torch.optim as optim
import xml.dom.minidom as minidom
import xml.etree.ElementTree as ET
from PIL import Image, ImageStat
from torch.optim.lr_scheduler import StepLR
import time,yaml
import torchvision
import sys
import logging
import json


__all__ = ['ResNet50', 'ResNet101','ResNet152']

def Conv1(in_planes, places, stride=2):
    return nn.Sequential(
        nn.Conv2d(in_channels=in_planes,out_channels=places,kernel_size=7,stride=stride,padding=3, bias=False),
        nn.BatchNorm2d(places),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
    )

class Bottleneck(nn.Module):
    def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 4):
        super(Bottleneck,self).__init__()
        self.expansion = expansion
        self.downsampling = downsampling

        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channels=in_places,out_channels=places,kernel_size=1,stride=1, bias=False),
            nn.BatchNorm2d(places),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False),
            nn.BatchNorm2d(places),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels=places, out_channels=places*self.expansion, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm2d(places*self.expansion),
        )

        if self.downsampling:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels=in_places, out_channels=places*self.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(places*self.expansion)
            )
        self.relu = nn.ReLU(inplace=True)
    def forward(self, x):
        residual = x
        out = self.bottleneck(x)

        if self.downsampling:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)
        return out

class ResNet(nn.Module):
    def __init__(self,blocks, num_classes=2, expansion = 4):
        super(ResNet,self).__init__()
        self.expansion = expansion

        self.conv1 = Conv1(in_planes = 3, places= 64)

        self.layer1 = self.make_layer(in_places = 64, places= 64, block=blocks[0], stride=1)
        self.layer2 = self.make_layer(in_places = 256,places=128, block=blocks[1], stride=2)
        self.layer3 = self.make_layer(in_places=512,places=256, block=blocks[2], stride=2)
        self.layer4 = self.make_layer(in_places=1024,places=512, block=blocks[3], stride=2)

        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(2048,2)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def make_layer(self, in_places, places, block, stride):
        layers = []
        layers.append(Bottleneck(in_places, places,stride, downsampling =True))
        for i in range(1, block):
            layers.append(Bottleneck(places*self.expansion, places))

        return nn.Sequential(*layers)


    def forward(self, x):
        x = self.conv1(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return torch.sigmoid(x)

def ResNet50():
    return ResNet([3, 4, 6, 3])



def read_xml(data_path, json_file, save_path, class_name_list, class_dist_num, logger):
    # Parts = []
    json_dict = {}
    class_number_array = np.zeros(class_dist_num,dtype=int)
    for xml in glob.glob(data_path+"/xml/*.xml"):
        xml_name = os.path.basename(xml)
        img_name = xml_name.split('.')[0] + '.jpg'
        one_hot_code=np.zeros(class_dist_num,dtype=int)
        tree = ET.parse(os.path.join(data_path+"/xml/", xml_name))
        root = tree.getroot()  
        for object1 in root.findall('object'):  # 修改的元素在object里面，所以需要先找到object
            for name in object1.findall('name'):# 查找想要修改的所有同种元素
                if name.text not in class_name_list:
                    print(os.path.basename(xml))
                class_number_array[class_name_list.index(name.text)]+=1
                one_hot_code[class_name_list.index(name.text)] = 1 
        json_dict[os.path.join("img/", img_name)] = ' '.join(str(int(i)) for i in one_hot_code)
        # Parts.append(os.path.join("img/", img_name +' ' + ' '.join(str(int(i)) for i in one_hot_code)))
             
    if len(class_name_list)>class_dist_num:
        print("error, classes in dataset are :",class_name_list)
    else:
        for i in range(class_dist_num):
            logger.info('{}: {}'.format(class_name_list[i], class_number_array[i]))
        with open(json_file, 'w') as json_fp:
            json.dump(json_dict, json_fp)
        # np.savetxt(os.path.join(save_path, txt_name), Parts, delimiter='\n',
        #            fmt='%s')
        #return class_name_list
        
def get_label(label_path):

    with open(label_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    # f = open(label_path)
    # lines = f.readlines()
    # return lines

class defcls_dataset(Dataset):
    def __init__(self, root, label, signal=' ', transform=None, to_tensor=None,is_train=True):
        self._root = root
        self._label = label
        self._transform = transform
        self._signal = signal
        self._to_tensor = to_tensor
        self._list_images(self._root, self._label, self._signal)

    def _list_images(self, root, label, signal):
        self.synsets = []
        self.synsets.append(root)
        self.items = []
        
        c = 0
        for img_file in label:
            if os.path.isfile(os.path.join(root, img_file)):
                self.items.append((os.path.join(root, img_file), np.array(label[img_file].split(' '), dtype = float)))
            else:
                print(os.path.join(root, img_file))
            c += 1
        # c = 0
        # for line in label:
        #     cls = line.rstrip('\n').split(signal)
        #     fn = cls.pop(0)
        #     if os.path.isfile(os.path.join(root, fn)):
        #         #print(cls)
        #         self.items.append((os.path.join(root, fn), np.array(cls, dtype = float)))
        #     else:
        #         print(os.path.join(root, fn))
        #     c += 1
        print('the total image is ', c)

    def __len__(self):
        return len(self.items)

    def __getitem__(self, index):

        img = Image.open(self.items[index][0])
        img = img.convert('RGB')
        label = self.items[index][1:]
        label = np.array(label)
        imname = self.items[index][0]
        (filepath, imname) = os.path.split(imname)
        if self._transform is not None:
            img = self._transform(img)
        return self._to_tensor(img), label



def get_logger(output_folder):
    logger = logging.getLogger('cpdistance train')
    log_file_name = os.path.join(output_folder, "log.txt")
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
    logger.setLevel(logging.INFO)
    
    sh = logging.StreamHandler(sys.stderr)  # 默认是sys.stderr
    sh.setLevel(logging.INFO)
    sh.setFormatter(formatter)
    
    fh = logging.FileHandler(log_file_name)
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    
    logger.addHandler(sh)
    logger.addHandler(fh)
    
    return logger

def check_dir(dir_path):
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
        





def main(cfg):
    #载入config配置
    data_dicts = yaml.full_load(open(cfg, encoding='utf-8'))
    if not data_dicts['output_folder']:
        output_folder = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'output')
    else:
        output_folder =  data_dicts['output_folder']
    check_dir(output_folder)

    #打开log
    logger = get_logger(output_folder)
    logger.info('<=======================start train=======================>')

    #预设值
    DEFAULT_CROP_PCT = data_dicts['train_config']['DEFAULT_CROP_PCT']
    IMAGENET_DEFAULT_MEAN = data_dicts['train_config']['IMAGENET_DEFAULT_MEAN']
    IMAGENET_DEFAULT_STD = data_dicts['train_config']['IMAGENET_DEFAULT_STD']
    IMAGENET_INCEPTION_MEAN = data_dicts['train_config']['IMAGENET_INCEPTION_MEAN']
    IMAGENET_INCEPTION_STD = data_dicts['train_config']['IMAGENET_INCEPTION_STD']
    IMAGENET_DPN_MEAN = tuple((eval(data_dicts['train_config']['IMAGENET_DPN_MEAN'])))
    IMAGENET_DPN_STD = tuple(eval(data_dicts['train_config']['IMAGENET_DPN_STD']))
    #配置值
    os.environ["CUDA_VISIBLE_DEVICES"] = str(data_dicts['cuda_device']) #读取设定显卡
    class_name_list = data_dicts['class_name_list']#去读labels
    if not data_dicts['data_folder']:
        data_folder = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'coco')
    else:
        data_folder = data_dicts['data_folder']
    json_file = os.path.join(data_folder, "train_data.json")
    resize_size = data_dicts['train_config']['resize_size'] #设定训练resize大小

    lr = data_dicts['optimizer_value']['learning_rate']
    betas = data_dicts['optimizer_value']['betas']
    eps = data_dicts['optimizer_value']['eps']
    weight_decay = data_dicts['optimizer_value']['weight_decay']
    amsgrad = data_dicts['optimizer_value']['amsgrad']
    step_size = data_dicts['optimizer_value']['scheduler_step_size']
    gamma = data_dicts['optimizer_value']['gamma']
    set_epoch = data_dicts['optimizer_value']['epoch']
    save_point = data_dicts['optimizer_value']['save_point']


    batch_size = data_dicts['data_loader']['batch_size']
    shuffle = data_dicts['data_loader']['shuffle']


    #初始化
    read_xml(data_folder, json_file, data_folder, class_name_list, len(class_name_list), logger)#读取labels
    train_dic = get_label(json_file)#生成label

    train_transformer = transforms.Compose([
        transforms.Resize(resize_size, Image.BILINEAR),
        transforms.RandomHorizontalFlip(),
    ])

    to_tensor = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
    ])


    dataset = defcls_dataset(data_folder, train_dic, signal=' ', transform=train_transformer, to_tensor=to_tensor,
                                    is_train = True) #按pytorch格式生成数据集
    model =  ResNet50().cuda().float()#初始化模型
    optimizer = optim.Adam(model.parameters(), lr, betas, eps, weight_decay, amsgrad)#生成 optimizer
    scheduler = StepLR(optimizer, step_size, gamma)
    loss_fn = nn.BCELoss() #损失函数
    train_DataLoader = DataLoader(dataset, batch_size, shuffle)
    lr_list = []
    logger.info("default learning rate: {}".format(optimizer.defaults['lr']))

    #执行
    for epoch in range(set_epoch):
        logger.info('<=======================NO.{}=======================>'.format(epoch + 1))
        tic = time.time()
        count = 0
        total_loss = 0
        for data in tqdm(train_DataLoader):
            inputs, labels = data
            inputs = Variable(inputs.cuda()).float()
            labels = Variable(labels.cuda()).float().squeeze(dim=1)
                # 将这些数据转换成Variable类型
            pred = model(inputs)
            
            loss = loss_fn(pred,labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_loss += loss.cpu().detach().numpy()
            count += 1
        logger.info("epoch: {} loss: {}".format(epoch + 1, total_loss/count))
        lr_list.append(optimizer.state_dict()['param_groups'][0]['lr'])
        scheduler.step()
        logger.info("NO.{} epoch learning rate: {}".format(epoch + 1, optimizer.param_groups[0]['lr']))
        if (epoch +1) % save_point == 0:
            path_name=os.path.join(output_folder ,str(epoch+1) + '.pth')
            torch.save(model.state_dict(),path_name)
        if epoch == set_epoch -1:
            path_name=os.path.join(output_folder  , 'final.pth')
            torch.save(model.state_dict(),path_name)
        toc = time.time()
        sec_remain = (toc - tic) * (set_epoch - epoch - 1)
        hh = int(sec_remain//3600)
        mm = int((sec_remain-hh*3600)//60)
        ss = int(sec_remain-hh*3600-mm*60)
        logger.info('time {}:{}:{} remains'.format(hh, mm, ss))
        

    logger.info("COMPLETE learning! Finial learning rate: {}".format(lr_list[-1]))

    with open(os.path.join(output_folder, 'config.yaml'), 'w') as c:
        new_dic = {}
        for k in ['info', 'more_info', 'optimizer_value', 'data_loader', 'train_config']:
            new_dic[k] = data_dicts[k]
        yaml.dump(new_dic, c, default_flow_style = False, allow_unicode = True, indent = 4)
    
    with open(os.path.join(output_folder, 'label.json'), 'w') as l:
        label_dic = {'1':'bubingpian', '2':'bingpian'}
        json.dump(label_dic, l)


if __name__ == '__main__':
    main(sys.argv[1])
