import json
import re
from datetime import datetime
import glob

import clip
import cv2
import numpy as np
import torch
from PIL import Image
from clip import tokenize

import baseconf

device = "cuda" if torch.cuda.is_available() else "cpu"
model, transforms = clip.load("ViT-B/32", device=device)

#准备数据
img_path = baseconf.BASE_DISK+":/datasets_path/tianchivit/test.png"
fristvideo_path = baseconf.BASE_DISK+":/datasets_path/tianchivit/fristvideo"
img=Image.open(img_path)
labels = ['kobe', 'james', 'Jordan']



image = transforms(img).unsqueeze(0).to(device)  # test.png为本文中图一，即CLIP的流程图
text = clip.tokenize(labels).to(device)  # 将这三句话向量化

with torch.no_grad():
    # image_features = model.encode_image(image) # 将图片进行编码
    # text_features = model.encode_text(text)    # 将文本进行编码

    logits_per_image, logits_per_text = model(image, text)
    probs = logits_per_image.softmax(dim=-1).cpu().numpy()

print("Label probs:", probs)  # prints: [[0.9927937  0.00421068 0.00299572]] # 图片"CLIP.png"对应"a diagram"的概率为0.9927937

# 设置图片路径和标签

# 打印结果
for label, prob in zip(labels, probs.squeeze()):
    print('该图片为 %s 的概率是：%.02f%%' % (label, prob * 100.))



cn_match_words = {
    "工况描述": ["高速/城市快速路", "城区", "郊区", "隧道", "停车场", "加油站/充电站", "未知"],
    "天气": ["晴天", "雨天", "多云", "雾天", "下雪", "未知"],
    "时间": ["白天", "夜晚", "拂晓/日暮", "未知"],
    "道路结构": ["十字路口", "丁字路口", "上下匝道", "车道汇入", "进出停车场", "环岛", "正常车道", "未知"],
    "一般障碍物": ["雉桶", "水马", "碎石/石块", "井盖", "减速带", "没有"],
    "道路异常情况": ["油污/水渍", "积水", "龟裂", "起伏不平", "没有", "未知"],
    "自车行为": ["直行", "左转", "右转", "停止", "掉头", "加速", "减速", "变道", "其它"],
    "最近的交通参与者": ["行人", "小型汽车", "卡车", "交警", "没有", "未知", "其它"],
    "最近的交通参与者行为": ["直行", "左转", "右转", "停止", "掉头", "加速", "减速", "变道", "其它"],
}

en_match_words = {
"scerario" : ["suburbs","city street","expressway","tunnel","parking-lot","gas or charging stations","unknown"],
"weather" : ["clear","cloudy","raining","foggy","snowy","unknown"],
"period" : ["daytime","dawn or dusk","night","unknown"],
"road_structure" : ["normal","crossroads","T-junction","ramp","lane merging","parking lot entrance","round about","unknown"],
"general_obstacle" : ["nothing","speed bumper","traffic cone","water horse","stone","manhole cover","nothing","unknown"],
"abnormal_condition" : ["uneven","oil or water stain","standing water","cracked","nothing","unknown"],
"ego_car_behavior" : ["slow down","go straight","turn right","turn left","stop","U-turn","speed up","lane change","others"],
"closest_participants_type" : ["passenger car","bus","truck","pedestrain","policeman","nothing","others","unknown"],
"closest_participants_behavior" : ["slow down","go straight","turn right","turn left","stop","U-turn","speed up","lane change","others"],
}


cap = cv2.VideoCapture(fristvideo_path+'/41.avi')
img = cap.read()[1]
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image.resize((600, 300))

submit_json = {
    "author": "alive_more_day",
    "time": f"{datetime.strftime(datetime.now(), '%Y%m%d')}",
    "model": "model_name",
    "test_results": []
}

paths = glob.glob(fristvideo_path+'/*')
paths.sort()

for video_path in paths:
    print(video_path)
    pattern = "|".join(map(re.escape, ['/','\\']))
    clip_id = result = re.split(pattern, video_path)[-1]
    cap = cv2.VideoCapture(video_path)
    img = cap.read()[1]
    image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    image = Image.fromarray(image)
    image = transforms(image).unsqueeze(0).to(device)

    single_video_result = {
        "clip_id": clip_id,
        "scerario": "cityroad",
        "weather": "unknown",
        "period": "night",
        "road_structure": "ramp",
        "general_obstacle": "nothing",
        "abnormal_condition": "nothing",
        "ego_car_behavior": "turning right",
        "closest_participants_type": "passenger car",
        "closest_participants_behavior": "braking"
    }

    for keyword in en_match_words.keys():
        if keyword not in ["weather", "road_structure"]:
            continue

        texts = np.array(en_match_words[keyword])

        with torch.no_grad():
            logits_per_image, logits_per_text = model(image, tokenize(en_match_words[keyword]).to(device))
            probs = logits_per_image.softmax(dim=-1).cpu().numpy()


        single_video_result[keyword] = texts[probs[0].argsort()[::-1][0]]

    submit_json["test_results"].append(single_video_result)

with open(file="./autofrist_result.json",encoding="utf-8",mode="w") as f:
    json_data = json.dumps(submit_json)
    f.write(json_data)
