import json
import re
from datetime import datetime
import glob


import cv2
import numpy as np
import torch
from PIL import Image

from transformers import CLIPProcessor, CLIPModel,pipeline
import baseconf

device = "cuda" if torch.cuda.is_available() else "cpu"


#准备数据
model_path = baseconf.BASE_DISK+":/model_path/clip-vit-large"
img_path = baseconf.BASE_DISK+":/datasets_path/tianchivit/test.png"
fristvideo_path = baseconf.BASE_DISK+":/datasets_path/tianchivit/fristvideo"

model = CLIPModel.from_pretrained(model_path)
processor = CLIPProcessor.from_pretrained(model_path)
model.to("cuda:0")
img=Image.open(img_path)
labels = ['kobe', 'james', 'Jordan']




with torch.no_grad():
    # image_features = model.encode_image(image) # 将图片进行编码
    # text_features = model.encode_text(text)    # 将文本进行编码
    inputs = processor(text=labels, images=img, return_tensors="pt", padding=True)
    # outputs = model(**inputs)
    outputs = model(input_ids=inputs["input_ids"].to(model.device),
                    attention_mask=inputs["attention_mask"].to(model.device),
                    pixel_values=inputs["pixel_values"].to(model.device))
    logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
    probs = logits_per_image.softmax(dim=-1).cpu().numpy()  # we can take the softmax to get the label probabilities


print("Label probs:", probs)  # prints: [[0.9927937  0.00421068 0.00299572]] # 图片"CLIP.png"对应"a diagram"的概率为0.9927937

# 设置图片路径和标签

# 打印结果
for label, prob in zip(labels, probs.squeeze()):
    print('该图片为 %s 的概率是：%.02f%%' % (label, prob * 100.))



cn_match_words = {
    "工况描述": ["高速/城市快速路", "城区", "郊区", "隧道", "停车场", "加油站/充电站", "未知"],
    "天气": ["晴天", "雨天", "多云", "雾天", "下雪", "未知"],
    "时间": ["白天", "夜晚", "拂晓/日暮", "未知"],
    "道路结构": ["十字路口", "丁字路口", "上下匝道", "车道汇入", "进出停车场", "环岛", "正常车道", "未知"],
    "一般障碍物": ["雉桶", "水马", "碎石/石块", "井盖", "减速带", "没有"],
    "道路异常情况": ["油污/水渍", "积水", "龟裂", "起伏不平", "没有", "未知"],
    "自车行为": ["直行", "左转", "右转", "停止", "掉头", "加速", "减速", "变道", "其它"],
    "最近的交通参与者": ["行人", "小型汽车", "卡车", "交警", "没有", "未知", "其它"],
    "最近的交通参与者行为": ["直行", "左转", "右转", "停止", "掉头", "加速", "减速", "变道", "其它"],
}

en_match_words = {
"scerario" : ["suburbs","city street","expressway","tunnel","parking-lot","gas or charging stations","unknown","suburbs"],
"weather" : ["clear","cloudy","raining","foggy","snowy","unknown","snowed"],
"period" : ["daytime","dawn or dusk","night","unknown"],
"road_structure" : ["normal","crossroads","T-junction","ramp","lane merging","parking lot entrance","round about","unknown"],
"general_obstacle" : ["nothing","speed bumper","traffic cone","water horse","stone","manhole cover","nothing","unknown"],
"abnormal_condition" : ["uneven","oil or water stain","standing water","cracked","nothing","unknown","snow cover"],
"ego_car_behavior" : ["slow down","go straight","turn right","turn left","stop","U-turn","speed up","lane change","others","braking"],
"closest_participants_type" : ["passenger car","bus","truck","pedestrain","policeman","nothing","others","unknown"],
"closest_participants_behavior" : ["slow down","go straight","turn right","turn left","stop","U-turn","speed up","lane change","others","braking","nothing"],
}


en_match_prompteverywords = {
"scerario" : ["The car was driving in the suburbs",
              "The car is driving in the city",
              "The car is on the highway",
              "The car was driving through the tunnel",
              "Cars in the parking lot",
              "The car is at a gas station or charging station",
              "unknown",
              "The car was driving through the valley"],
"weather" : ["The weather is sunny",
             "It is a cloudy day",
             "It is snowing now",
             "It is a foggy day",
             "There's snow outside",
             "unknown",
             "It has snowed"],
"period" : ["It is daylight",
            "It's dawn or dusk"
            ,"It is night"
            ,"unknown"],
"road_structure" : ["Driving on normal roads",
                    "Driving on intersection roads",
                    "Driving on T-junction",
                    "Cars enter or leave the ramp",
                    "The car is entering the lane into the road",
                    "Cars enter and exit the parking lot",
                    "The car is driving on the roundabout road",
                    "unknown"],
"general_obstacle" : ["Nothing on the road" ,
                      "There are speed bumps  in the middle of the road",
                      "There are traffic cone  in the middle of the road",
                      "There are  water horse  in the middle of the road",
                      "There are rocks or pieces  in the middle of the road",
                      "There are a manhole cover  in the middle of the road",
                      "Nothing in the middle of the road",
                      "unknown"],
"abnormal_condition" : ["The road is uneven and bumpy",
                        "There are oil or water stains on the road",
                        "There is standing water in the road",
                        "There are some cracks in the road",
                        "Nothing on the road",
                        "unknown",
                        "There is snow on the road"],
"ego_car_behavior" : ["The video car finally slow down",
                      "the video car is going straight",
                      "the video car is turning right",
                      "the video car is turning left",
                      "The video car finally stopped",
                      "The video car finally turned around",
                      "This video car is accelerating",
                      "This video car is changing lanes",
                      "others",
                      "braking"],
"closest_participants_type" : ["In front is the back or front of the car",
                               "In front is the back or front of the  bus",
                               "In front is the back or front of the  truck",
                               "The nearest person in front of the video is one or more ordinary pedestrians",
                               "The closest thing in front of the video is one or more police officers",
                               "There's nothing in front of the video but the road",
                               "others",
                               "unknown"],
"closest_participants_behavior" : ["A pedestrian or vehicle in front of you is slowing down",
                                   "Pedestrians or vehicles in front of you keep driving or walking",
                                   "The pedestrian or vehicle in front is turning right",
                                   "The pedestrian or vehicle in front is turning left",
                                   "Complete stop of pedestrians or vehicles ahead",
                                   "A pedestrian or vehicle in front is making a U-turn",
                                   "The pedestrian or vehicle in front is accelerating forward",
                                   "A pedestrian or vehicle in front of you is changing lanes",
                                   "others",
                                    "The pedestrian or vehicle in front of him is braking or the taillight is red but he is not stopping",
                                    "nothing"]
}

# cap = cv2.VideoCapture(fristvideo_path+'/41.avi')
# img = cap.read()[1]
# image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# image = Image.fromarray(image)
# image.resize((600, 300))

submit_json = {
    "author": "alive_more_day",
    "time": f"{datetime.strftime(datetime.now(), '%Y%m%d')}",
    "model": "model_name",
    "test_results": []
}

paths = glob.glob(fristvideo_path+'/*')
paths.sort()

for video_path in paths:
    print(video_path)
    pattern = "|".join(map(re.escape, ['/','\\']))
    clip_id = result = re.split(pattern, video_path)[-1]
    cap = cv2.VideoCapture(video_path)
    openflag, img = cap.read()
    # openflag=True
    # while  openflag:
    #     openflag,img = cap.read()
    #     if img is None:
    #         break
    #     if openflag:
    #         image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    #         cv2.imshow("result", img)
    #         if cv2.waitKey(1) & 0xFF == 27:  # 设置等待时间（毫秒级）以及 退出按钮：27表示Esc按键
    #             break
    image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    image = Image.fromarray(image)
    # image = transforms(image).unsqueeze(0).to(device)

    single_video_result = {
        "clip_id": clip_id,
        "scerario": "cityroad",
        "weather": "unknown",
        "period": "night",
        "road_structure": "ramp",
        "general_obstacle": "nothing",
        "abnormal_condition": "nothing",
        "ego_car_behavior": "turning right",
        "closest_participants_type": "passenger car",
        "closest_participants_behavior": "braking"
    }

    for keyword in en_match_words.keys():
        if keyword not in ["scerario","weather", "period","road_structure",
                           "general_obstacle","abnormal_condition","ego_car_behavior","closest_participants_type","closest_participants_behavior"]:
            continue

        primarytexts=en_match_words[keyword]
        latesttexts=en_match_prompteverywords[keyword]
        # texts = np.array(en_match_words[keyword])

        with torch.no_grad():
            inputs = processor(text=latesttexts, images=image, return_tensors="pt", padding=True)
            # outputs = model(**inputs)
            outputs = model(input_ids=inputs["input_ids"].to(model.device),
                            attention_mask=inputs["attention_mask"].to(model.device),
                            pixel_values=inputs["pixel_values"].to(model.device))
            logits_per_image = outputs.logits_per_image
            probs = logits_per_image.softmax(dim=-1).cpu().numpy()


        single_video_result[keyword] = primarytexts[probs[0].argsort()[::-1][0]]

    submit_json["test_results"].append(single_video_result)

with open(file="./autofrist_result.json",encoding="utf-8",mode="w") as f:
    json_data = json.dumps(submit_json)
    f.write(json_data)
