import torch
import json
import cv2
import os
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from collections import Counter
import torchvision.models as models
from model import *
from process_data import concat_data,normalize_data
from sklearn.metrics import f1_score
# import skimage.io as io
# from skimage.transform import resize
from model import *
Concat = 0
Normalize = True
# data_path = '../test'
## 存储结果的json文件

# save_path = '../label/final/result.json'
# data_path = '../final_train'
# ## 给定的json文件用来读取数据
# json_file = open('../label/amap_traffic_final_test_0906.json')


## 存储结果的json文件
# save_path = '/workspace/result.json'
save_path = './submit_result.json'
# data_path = '/tcdata/amap_traffic_final_test_data'
data_path = '../final_train'
## 给定的json文件用来读取数据
# json_file = open('/tcdata/amap_traffic_final_test_0906.json')
json_file = open('../label/final_val_2.json')


num_class = 4
# image_size = (720,416)
# image_size = (512,288)
# image_size2 = (960,512)
# image_size  = (800,488)
# image_size = (800, 450)
image_size = (300, 170)
# image_size = (512, 288)
# pre_trained_dir = '../model_save/res50_720/best_.pth'
# pre_trained_dir = '../model_save/resnext50_32x4d_concat_all_800/best_.pth'
# pre_trained_dir = '../model_save/resnext50_32x4d_concat_all_weight_balance_800/best_.pth'
# pre_trained_dir = '../model_save/resnext50_32x4d_concat_all_freeze_from_new_800/best_.pth'
# pre_trained_dir = '../model_save/resnext50_32x4d_concat_all_weight_blance_mixup_800/best_.pth'
pre_trained_dir = '../model_save/final/1003/resnext50_concat_0_freeze_data2_300/best_.pth'
# pre_trained_dir = 'Z:\DL_Code\Map_cate\model_save/final/resnext50_concat_diff_lr_all_300/best_.pth'

model = model_resnext50_32x4d(Num_Class=num_class)
# model = model_InceptionV4(num_class=4)
if torch.cuda.is_available():
    model = model.cuda()
# model = model_Xception(num_class=num_class).cuda()
# model = model_res18(num_class=num_class).cuda()
if torch.cuda.is_available():
    model.load_state_dict(torch.load(pre_trained_dir))
else:
    model.load_state_dict(torch.load(pre_trained_dir,map_location='cpu'))
json_file = json.load(json_file)




def pred_fun(model,image,normalize,size=None):
    with torch.no_grad():
        model.eval()
        if size is not None:
            image = cv2.resize(image,size)
            # image = resize(image,size[::-1])
        if normalize:
            image = (normalize_data(image))[np.newaxis,...]
        else:
            image = torch.from_numpy(np.transpose(image, (2, 0, 1))).float()[np.newaxis, ...]
        if torch.cuda.is_available():
            image = image.cuda()
        pred = model(image)
        pred = torch.argmax(F.log_softmax(pred[0], dim=0), dim=0)
        pred = pred.item()
        print('***--',pred)
        return pred

# for index,single_anno in enumerate(json_file['annotations']):
#     pred_list = [0,0,0]
#     image_dir = data_path+'/'+single_anno['id']+'/'+single_anno['key_frame']
#     image = cv2.imread(image_dir)
#     pred = pred_fun(model1,image,image_size1)
#     ## 关键帧的权重高0.5.
#     pred_list[pred] += 0.5
#     ## 针对全部数据都进行预测，然后投票。关键帧1.5票。
#     for name in single_anno['frames']:
#         image_dir = data_path+'/'+single_anno['id']+'/'+name['frame_name']
#         image = cv2.resize(cv2.imread(image_dir), image_size)
#         pred = pred_fun(model, image)
#         pred_list[pred] += 1
#     json_file['annotations'][index]['status'] = pred_list.index(max(pred_list))
#     print(json_file['annotations'][index]['status'])
#
# with open(save_path,'w') as f:
#     json.dump(json_file,f)



# ## 分两个阶段进行,同一个序列下的数据进行投票，其中key frame权重多0.5
# for index,single_anno in enumerate(json_file['annotations']):
#     pred_list = [0,0,0]
#     image_dir = data_path+'/'+single_anno['id']+'/'+single_anno['key_frame']
#     image = cv2.imread(image_dir)
#     pred = pred_fun(model1,image,image_size1)
#     ## 关键帧的权重高0.5.
#     pred_list[pred] += 0.5
#     ## 针对全部数据都进行预测，然后投票。关键帧1.5票。
#     for name in single_anno['frames']:
#         image_dir = data_path+'/'+single_anno['id']+'/'+name['frame_name']
#         image = cv2.imread(image_dir)
#         pred = pred_fun(model1, image,image_size1)
#         if pred == 1:
#             pred = pred_fun(model2, image,image_size2)+1
#         pred_list[pred] += 1
#     json_file['annotations'][index]['status'] = pred_list.index(max(pred_list))
#     print(json_file['annotations'][index]['status'])
#
# with open(save_path,'w') as f:
#     json.dump(json_file,f)


label_list = []
pred_list = []
## 使用同一个序列下concat后的数据进行预测
for index,single_anno in enumerate(tqdm(json_file)):
    image_dir = (data_path+'/'+single_anno['id']+'/'+single_anno['key_frame']).replace('\\','/')
    label_list.append(single_anno['status'])
    path = os.path.dirname(image_dir)
    key_frame = int(image_dir.split('/')[-1].replace('.jpg', ''))
    data_list = []
    for name in os.listdir(path):
        image_dir = os.path.join(path, name)
        image = cv2.resize(cv2.imread(image_dir), image_size)
        # image = resize(io.imread(image_dir),image_size[::-1])
        data_list.append(image)
    image = concat_data(data_list, key_frame, down_sample=1,concat=Concat)
    ## concat_data函数已经对image size进行处理，pred fun中不需要进行resize操作。

    ## 单个阶段进行预测
    pred = pred_fun(model, image, normalize=Normalize,size=None)
    pred_list.append(pred)
    json_file[index]['status'] = pred
    # print(json_file['annotations'][index]['status'])
print(pred_list)
print(label_list)
print(Counter(pred_list))
print(Counter(label_list))
f1 = f1_score(label_list,pred_list,average=None)
print(f1)
print(0.1*f1[0]+0.2*f1[1] + 0.3*f1[2] + 0.4* f1[3])

with open(save_path,'w') as f:
    json.dump(json_file,f)
