import cv2
import time
import os
import json
from LipFeaturaExtract.landmarks import FaceLandmarks,normalization


# 数据集路径
lipdata_path = "/home/lisen/tool/dataset/唇语数据集句子"
save_dir = "/home/lisen/tool/PyProjects/唇语识别/dataset_sentence"
pattern="test"

# 参数
extract_face=False      # 是否提取脸部
scaling_factor=1        # 图片放缩比例
threshold=0.0           # 过滤的阈值
save_josn_epchs=3      # 保存代数

if not os.path.exists(save_dir):
    os.mkdir(save_dir)
extract_sentences=[]
if not os.path.exists(os.path.join(save_dir,pattern)):
    os.mkdir(os.path.join(save_dir,pattern))

# 生成脸部特征提取实例
faceLandmarks=FaceLandmarks(scaling_factor=scaling_factor,extract_face=extract_face)

# 提取文件下所有唇部特征并保存
files_list = os.listdir(os.path.join(lipdata_path,pattern))
files_list_len = len(files_list)

sentences_josn_dict={}
sentences_count=0
file_count=1
for file_index,file in enumerate(files_list):
    sentence_lip_data=[]
    print("========Extract file:{} ({}/{})==========".format(
        file, file_index+1,files_list_len))
    videos_txt_list = os.listdir(os.path.join(lipdata_path, pattern,file))
    videos_txt_list=set([i[:-4] for i in videos_txt_list])  # 去重
    videos_txt_list_len=len( videos_txt_list)
    for video_index,label_name in enumerate(videos_txt_list):
        video_file_path = os.path.join(lipdata_path,pattern,file,label_name+".mp4")  # 视频流文件路径
        sentence_label_path= os.path.join(lipdata_path,pattern,file,label_name+".txt")  # 句子标签文件路径
        with open(sentence_label_path,"r",encoding="utf-8") as f:
            sentence_label=f.readlines()[0]
            sentence_label=sentence_label[7:].strip("\n")
        
        # 提取特征
        cap = cv2.VideoCapture(video_file_path)  # 打开摄像头
        video_lip_data = []
        start_time = time.clock()  # 开始时间
        ret = True
        video_frame_number=cap.get(cv2.CAP_PROP_FRAME_COUNT)
        while ret:
            ret, frame = cap.read()  # 读取一帧图片
            if ret:
                # face_locations,lip_features=faceLandmarks.extractLandmarks(frame)
                lip_features = faceLandmarks.extractLandmarks(frame)

                # # 绘制特征区域
                # for feature in lip_features:
                #     for point in feature:
                #         cv2.circle(frame, (point[0],point[1]),
                #                    1, (0, 0, 255), -1)

                # 对数据进行归一化
                if len(lip_features)==1 and len(lip_features[0])/video_frame_number>=threshold:
                    w=frame.shape[1]      # 使用图像尺寸进行归一化
                    h=frame.shape[0]
                    # h=face_locations[0][2]-face_locations[0][0]     # 使用
                    # w=face_locations[0][1]-face_locations[0][3]
                    lip_features = normalization(lip_features,w,h)[0]
                    video_lip_data.append(lip_features)

                # # 显示图像
                # cv2.imshow("src", frame)
                # key=cv2.waitKey(1)
                # if key == 27:
                #     ret = False

        sentences_josn_dict[sentence_label]=video_lip_data
        end_time = time.clock()
        print("schedule:{}/{}  time:{:.6f}".format(video_index+1,videos_txt_list_len,end_time-start_time))
        cap.release()
        if sentences_count % save_josn_epchs==save_josn_epchs-1:
            print(sentences_count)
            with open(os.path.join(save_dir, pattern, "feature_"+str(file_count)+".json"), "w", encoding="utf-8") as f:
                json.dump(sentences_josn_dict,
                          fp=f, ensure_ascii=False)
                sentences_josn_dict={}
                file_count+=1
        sentences_count += 1

    print("============Extract one file!=================")
print("提取完成")