import json
import time

from ScrapyObject.spiders.utils.aa import spiderInfo


# 读取JSON文件
def read_json_file(file_path):
    with open(file_path, 'r', encoding='utf-8') as file:
        data = json.load(file)
        return data


def read_json_file_two(file_path):
    with open(file_path, 'r') as file:
        data = json.load(file)
        return data


class VideoBean:
    id = 0
    name = ''
    url = ''
    e = ''
    i = ''
    tags = ''
    pUrl = ''
    vUrl = ''

    def __eq__(self, other):
        return self.pUrl == other.pUrl and self.vUrl == other.vUrl

    def __hash__(self):
        return hash(self.pUrl + self.vUrl)


class SpiderInfo:
    # 爬虫的名称
    name = ''
    # 网址
    url = ''
    # 爬取下来的数据是否需要合并
    isMerge = False
    # 是否需要验证videoUrl的有效性
    isPlay = False

    def __init__(self, name, url, isMerge, isPlay):
        # 爬虫的名称
        self.name = name
        # 网址
        self.url = url
        # 爬取下来的数据是否需要合并
        self.isMerge = isMerge
        # 是否需要验证videoUrl的有效性
        self.isPlay = isPlay


# 1完整数据并去重复
if __name__ == '__main__':
    print('开始')
    timestamp = int(time.time())
    jsonStr = read_json_file('../../../' + spiderInfo.name + '1.json')
    print("原始数据一共: " + str(len(jsonStr)))
    videoList = []
    for i in jsonStr:
        if '>' in i['vUrl'] or ')' in i['vUrl'] or '"' in i['vUrl']:
            print(i['vUrl'])
        else:
            videoList.append(i)
    jsonStr.clear()
    # 将JSON数组转换为字符串
    json_string = json.dumps(videoList, ensure_ascii=False)
    # 将字符串写入txt文件
    with open('../../../' + spiderInfo.name + '2.json', 'w', encoding='utf-8') as file:
        file.write(json_string)
    endTime = int(time.time())
    print(str(endTime - timestamp))
    print('结束')
