# import requests
# from unsplash_spider_image.user_agent_kuaidaili_proxy import get_random_ua
# xhr_url = 'https://assets.mixkit.co/videos/preview/mixkit-hands-full-of-coins-13259-large.mp4'
# proxy = {'http': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/', 'https': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/'}
#
#
# def xhr_get_method():
#     # 获取列表页图片数据
#     new_user_agent = get_random_ua()
#     # print('new_user_agent', new_user_agent)
#     headers = {
#         'Connection': 'close',
#         'Cookie': 'CookieConsent={stamp:%27-1%27%2Cnecessary:true%2Cpreferences:true%2Cstatistics:true%2Cmarketing:true%2Cmethod:%27implied%27%2Cver:1%2Cutc:1701682560290%2Cregion:%27KR%27}; _gcl_au=1.1.22244747.1701682563; _gid=GA1.2.1569933861.1701682563; _fbp=fb.1.1701682564796.948669068; _clck=1551ykf%7C2%7Cfha%7C1%7C1433; algolia-user-token=2ce69c086843928b5c2e0c773854d54f; __cf_bm=pGKIWkiYIZJvZbKzboCMe.tNTYwYbNXTqVzHBkze95w-1701761957-0-AbgVKfcUePEQCivPGIld59oiV9cWTanskty0K36/47ZovaWJ46JKQAwGhNCeKKVsL1da0W9iqhns4/QenD/68hs=; _clsk=qy7geb%7C1701761957835%7C6%7C1%7Cw.clarity.ms%2Fcollect; _ga=GA1.1.1620032917.1701682563; _gat_gtag_UA_11834194_84=1; _ga_HD6V8WBY2G=GS1.1.1701761986.4.1.1701762071.0.0.0; _ga_VXF53CMVLJ=GS1.1.1701761986.4.1.1701762071.0.0.0',
#         'User-Agent': new_user_agent
#     }
#
#     response = requests.get(xhr_url, headers=headers, timeout=45)
#     video = response.content
#     print('response', video)
#     # 保存为视频
#     with open('test.mp4', 'wb+') as f:
#         f.write(video)
#
#
# xhr_get_method()



import random
import re
import time
import pymysql
import redis
import requests
import unicodedata
from utils.msg_queue import connect_message_queue
from setting.redis_config import redis_test_url
from setting.mysql_test import mysql45_config
from unsplash_spider_image.user_agent_kuaidaili_proxy import get_random_ua
from utils.md5 import MD5Utils
import datetime
from lxml import etree

#redsi_test_url = "redis://192.168.1.22:6379/1"  ip会动态变化的

URL = 'https://mixkit.co/free-stock-video/'
proxy = {'http': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/', 'https': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/'}
REDIS_PASSWORD = None


class MixkitVideoSpiderProduce(object):
    def __init__(self, query):
        self.redis_url = redis_test_url
        #统一为小写
        self.query = query.lower()
        self.init_url = URL+self.query
        print('init_url', self.init_url)
        # self.mysql_conn = pymysql.connect(**mysql45_config)
        # self.mysql_cursor = self.mysql_conn.cursor()
        # self.redis_conn = redis.Redis(host='localhost', port=6379, db=5, password=REDIS_PASSWORD)

    # def redis_new_queue(self, json_msg):
    #     q = connect_message_queue(self.query, url=self.redis_url, maxsize=10000, lazy_limit=True)
    #     q.put(json_msg)
    #     print('插入redis队列成功')

    def request_get_method(self, url):
          #获取列表页图片数据
          new_user_agent = get_random_ua()
          headers = {
              'Connection': 'close',
              'Cookie': 'CookieConsent={stamp:%27-1%27%2Cnecessary:true%2Cpreferences:true%2Cstatistics:true%2Cmarketing:true%2Cmethod:%27implied%27%2Cver:1%2Cutc:1701682560290%2Cregion:%27KR%27}; _gcl_au=1.1.22244747.1701682563; _gid=GA1.2.1569933861.1701682563; _fbp=fb.1.1701682564796.948669068; _clck=1551ykf%7C2%7Cfha%7C1%7C1433; algolia-user-token=2ce69c086843928b5c2e0c773854d54f; _ga=GA1.1.1620032917.1701682563; _ga_HD6V8WBY2G=GS1.1.1701745701.2.1.1701745902.0.0.0; _ga_VXF53CMVLJ=GS1.1.1701745701.2.1.1701745902.0.0.0; __cf_bm=2IGiMuMP49lpT83FCmJt9Bmu39qX8JAnDzAi4jxuE8Y-1701747940-0-Aa+TBrFApP23G8ePGcBGfxkluUSqiM0JdQGelSOYS//kENKH7WrgOwghfKEXZcCSPMKz/fRuHazrhMOOVy2tpR8=; _clsk=fewetv%7C1701747941477%7C6%7C1%7Cw.clarity.ms%2Fcollect',
              'User-Agent': new_user_agent
          }

          response = requests.get(url, headers=headers, timeout=45)
          response.encoding = 'utf-8'
          html = response.content.decode('utf-8')
          print('response', response.status_code)
          request_url = response.url
          return html, request_url

    def parse_list_html(self, tree):
        full_urls = []
        #video_urls = tree.xpath('/html/body/app-root/app-shell/main/search-page/div/videos-grid/coverr-grid[2]/div/div/coverr-video/a/@href')
        video_urls = tree.xpath('/html/body/div[4]/div/div[2]/div[1]/div/div/div[1]/div[3]/a/@href')
        if video_urls is None:
            return None
        for video_url in video_urls:
            full_url = 'https://mixkit.co/' + video_url
            full_urls.append(full_url)
        print('full_urls', len(full_urls))
        return full_urls

    def parse_detail_html(self, full_url):
        global video_name
        next_html, detail_url = self.request_get_method(full_url)
        if next_html is None:
            return
        detail_html = etree.HTML(next_html)
        download_urls = detail_html.xpath('/html/body/div[3]/div[2]/div[1]/video/@src')
        if download_urls is None:
            return
        video_type = [download_url.split('.')[-1] for download_url in download_urls]

        # 获取当前时间戳并转换为字符串
        now_time = datetime.datetime.now()  # 显示更加精确的时间（包括微秒）
        now_time_str = now_time.strftime("%Y%m%d_%H%M%S.%f")  # 将时间对象转换为字符串
        for type in video_type:
            video_name = self.query + '_' + now_time_str + '.' + type
        descriptions = detail_html.xpath('/html/body/div[2]/span/text()')
        if descriptions is None:
            return
        # 去除开头结尾的空格和\n
        descriptions = [description.replace('\n', '') for description in descriptions]
        descriptions = [description.strip() for description in descriptions]
        labels = detail_html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/a//text()')
        # 需要添加label表去重的判断

        # 将列表中转换为字符串
        download_url_str = download_urls[0] if download_urls else ''
        description_str = descriptions[0] if descriptions else ''
        labels_str = ', '.join(labels) if labels else ''
        print('download_url_str', download_url_str, 'description_str', description_str, 'labels_str', labels_str)
        return download_url_str, description_str, labels_str, video_name


    def run(self):
        html, request_url = self.request_get_method(self.init_url)
        print('html', html)
        tree = etree.HTML(html)
        full_urls = self.parse_list_html(tree)
        if full_urls is None:
            return None
        for i, full_url in enumerate(full_urls):
            download_url_str, description_str, labels_str, video_name = self.parse_detail_html(full_url)


query = 'web'
mixkit_video_spider = MixkitVideoSpiderProduce(query)
mixkit_video_spider.run()
