import random
import time
import pymysql
import redis
import requests
from utils.msg_queue import connect_message_queue
from setting.redis_config import redis_test_url
from setting.mysql_test import mysql45_config
from unsplash_spider_image.user_agent_kuaidaili_proxy import get_random_ua
from utils.md5 import MD5Utils
import datetime
from lxml import etree

#redsi_test_url = "redis://192.168.1.22:6379/1"  ip会动态变化的

URL = 'https://mixkit.co/free-stock-video/'
proxy = {'http': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/', 'https': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/'}
REDIS_PASSWORD = None


class MixkitVideoSpiderProduce(object):
    def __init__(self, query):
        self.video_source = 'mixkit'
        self.redis_url = redis_test_url
        #统一为小写
        self.query = query.lower()
        self.init_url = URL+self.query
        self.mysql_conn = pymysql.connect(**mysql45_config)
        self.mysql_cursor = self.mysql_conn.cursor()
        self.redis_conn = redis.Redis(host='localhost', port=6379, db=5, password=REDIS_PASSWORD)

    def redis_new_queue(self, json_msg):
        q = connect_message_queue(self.query, url=self.redis_url, maxsize=10000, lazy_limit=True)
        q.put(json_msg)
        print('插入redis队列成功')

    def distinct_url_redis(self, json_msg):
        md5_url = json_msg['md5_url']
        if self.redis_conn.sismember('mixkit_url_set', md5_url):
            print('md5_url已存在跳过')
            return
        else:
            self.redis_conn.sadd('mixkit_url_set', md5_url)
            self.redis_new_queue(json_msg)

    def distinct_retry_url_redis(self, url):
        if self.redis_conn.sismember('retry_url_set', url):
            print('retry_url已存在跳过')
            return True
        else:
            self.redis_conn.sadd('retry_url_set', url)
            return False

    def get_detail_data(self, description, video_url, tag_title, video_source, video_name):
        # 非会员图片，下载，保存到数据库
        data = {}
        data['video_url'] = video_url
        data['video_name'] = video_name
        md5_url = MD5Utils.encrypt(video_url)
        data['md5_url'] = md5_url
        data['tag_title'] = tag_title
        data['source'] = video_source
        data['description'] = description
        data['download_status'] = 0

        download_data = {}
        download_data['video_url'] = video_url
        download_data['video_name'] = video_name
        download_data['md5_url'] = MD5Utils.encrypt(video_url)
        return data, download_data, md5_url


    def save_full_to_datebase(self, json_msg):
        # 解析json数据
        video_url = json_msg['video_url']
        video_name = json_msg['video_name']
        tag_title = json_msg['tag_title']
        source = json_msg['source']
        description = json_msg['description']
        md5_url = json_msg['md5_url']
        download_status = json_msg['download_status']

        # 查询名称是否已存在
        select_sql = "SELECT * FROM video_info WHERE md5_url='%s'" % md5_url
        self.mysql_cursor.execute(select_sql)
        result = self.mysql_cursor.fetchone()

        if result:
            # 找到重复数据,跳过插入
            print("%s视频已存在" % video_url)
            return

        insert_sql = "INSERT INTO video_info (source, search_key, video_name, video_url, md5_url, tag_title, description, download_status, create_on) " \
                     "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, NOW())"
        self.mysql_cursor.execute(insert_sql, (source, self.query, video_name, video_url, md5_url, tag_title,  description, download_status))
        self.mysql_conn.commit()


    def get_video_id(self, md5_url):
        select_video_id_sql = "SELECT id FROM video_info WHERE md5_url='%s'" % md5_url
        self.mysql_cursor.execute(select_video_id_sql)
        video_id = self.mysql_cursor.fetchone()
        return video_id

    def request_get_method(self, url):
        #获取列表页图片数据
        new_user_agent = get_random_ua()
        headers = {
            'Connection': 'close',
            'Cookie': 'MUID=28E5E93B510C61F41338F8C8550C67FD; ANON=A=A67F9D02196C5396788B8ACEFFFFFFFF&E=1d0f&W=3; NAP=V=1.9&E=1cb5&C=MAOzZWba1rkOSPGSoeyT-k0QG-zdKUl8iPnt1t94t4eexeyA1CS86w&W=3',
            'User-Agent': new_user_agent
        }

        response = requests.get(url, headers=headers, timeout=45)
        response.encoding = 'utf-8'
        html = response.content.decode('utf-8')
        print('response', response.status_code)
        request_url = response.url
        return html, request_url


    def parse_list_html(self, tree):
        full_urls = []
        page_urls = []
        url_set = set()
        video_urls = tree.xpath('/html/body/div[4]/div/div[2]/div[1]/div/div/div[1]/div[3]/a/@href')
        if video_urls is None:
            return None
        for video_url in video_urls:
            full_url = 'https://mixkit.co' + video_url
            full_urls.append(full_url)
        page_video_urls = tree.xpath('/html/body/div[4]/div/div[2]/div[2]/a/@href')
        if page_video_urls is None:
            return None
        for page_video_url in page_video_urls:
            page_url = 'https://mixkit.co' + page_video_url
            #需要判断是否在url_set
            if page_url in url_set:
                continue
            url_set.add(page_url)
            page_urls.append(page_url)
        return full_urls, page_urls

    def parse_data(self, next_html):
        global video_name
        detail_html = etree.HTML(next_html)
        download_urls = detail_html.xpath('/html/body/div[3]/div[2]/div[1]/video/@src')
        if download_urls is None:
            return None, None, None, None, None
        video_type = [download_url.split('.')[-1] for download_url in download_urls]

        # 获取当前时间戳并转换为字符串
        now_time = datetime.datetime.now()  # 显示更加精确的时间（包括微秒）
        now_time_str = now_time.strftime("%Y%m%d_%H%M%S.%f")  # 将时间对象转换为字符串
        for type in video_type:
            video_name = self.query + '_' + now_time_str + '.' + type
        descriptions = detail_html.xpath('/html/body/div[2]/span/text()')
        if descriptions is None:
            return None, None, None, None, None
        # 去除开头结尾的空格和\n
        descriptions = [description.replace('\n', '') for description in descriptions]
        descriptions = [description.strip() for description in descriptions]
        labels = detail_html.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/a//text()')
        # 单个标签写入标签表
        tag_ids = self.save_tag_to_mysql(labels)
        return download_urls, video_name, descriptions, labels, tag_ids

    def parse_detail_html(self, full_url):
        global detail_url
        print('full_url', full_url)
        try:
            next_html, detail_url = self.request_get_method(full_url)
        except Exception as e:
            retry_url_queue_name = 'page_url_retry_queue'
            retry_result = self.distinct_retry_url_redis(detail_url)
            if retry_result:
                return
            q = connect_message_queue(retry_url_queue_name, url=self.redis_url, maxsize=10000, lazy_limit=True)
            q.put(request_url)
            return
        if next_html is None:
            return None

        download_urls, video_name, descriptions, labels, tag_ids = self.parse_data(next_html)
        # 将列表中转换为字符串
        download_url_str = download_urls[0] if download_urls else ''
        description_str = descriptions[0] if descriptions else ''
        labels_str = ', '.join(labels) if labels else ''
        print('download_url_str', download_url_str, 'description_str', description_str, 'labels_str', labels_str)
        return download_url_str, description_str, labels_str, video_name, tag_ids

    def save_tag_to_mysql(self, labels):
        tag_ids = []
        for label in labels:
            select_tag_id_sql = 'select id from video_tag_info where tag = %s'
            self.mysql_cursor.execute(select_tag_id_sql, (label,))
            tag_id = self.mysql_cursor.fetchone()
            if tag_id:
                tag_ids.append(tag_id)
                continue

            insert_tag_sql = 'insert into video_tag_info (tag, create_on) values (%s, NOW())'
            self.mysql_cursor.execute(insert_tag_sql, (label,))
            self.mysql_conn.commit()

            select_tag_id_sql = 'select id from video_tag_info where tag = %s'
            self.mysql_cursor.execute(select_tag_id_sql, (label,))
            tag_id = self.mysql_cursor.fetchone()
            if tag_id:
                tag_ids.append(tag_id)
            return tag_ids

    def save_video_tag_relationship(self, video_id, tag_id):
        distinct_video_tag_sql = 'select id from video_tag_relationship where video_id = %s and video_tag_id = %s'
        self.mysql_cursor.execute(distinct_video_tag_sql, (video_id, tag_id))
        image_tag_id = self.mysql_cursor.fetchone()
        if image_tag_id:
            return image_tag_id

        insert_video_tag_sql = 'insert into video_tag_relationship (video_id, video_tag_id, create_on) values (%s, %s, NOW())'
        self.mysql_cursor.execute(insert_video_tag_sql, (video_id, tag_id))
        self.mysql_conn.commit()

    def get_page_video_url(self, page_video_url):
        print('page_video_url', page_video_url)
        global request_url, html
        try:
            html, request_url = self.request_get_method(page_video_url)
            tree = etree.HTML(html)
            print('tree', tree)
            return tree
        except Exception as e:
            retry_url_queue_name = 'page_url_retry_queue'
            retry_result = self.distinct_retry_url_redis(request_url)
            print('get_page_video_url', retry_result)
            if retry_result:
                return
            q = connect_message_queue(retry_url_queue_name, url=self.redis_url, maxsize=10000, lazy_limit=True)
            q.put(request_url)
            return None

    def run(self):
        total_urls = []
        tree = self.get_page_video_url(self.init_url)
        if tree is None:
            return None
        full_urls, page_video_urls = self.parse_list_html(tree)
        total_urls = total_urls + full_urls
        for page_video_url in page_video_urls[1:]:
            page_tree = self.get_page_video_url(page_video_url)
            if page_tree is None:
                continue
            urls, page_video_urls = self.parse_list_html(page_tree)
            total_urls = total_urls + urls
        print('total_urls',len(total_urls))

        #获取完搜索关键词所有的视频url，在逐个解析
        if total_urls is None:
            return None
        for i, full_url in enumerate(total_urls):
            download_url_str, description_str, labels_str, video_name, tag_ids = self.parse_detail_html(full_url)
            if download_url_str is None or download_url_str == '':
                continue
            detail_data, download_data, md5_url = self.get_detail_data(description_str, download_url_str, labels_str, self.video_source, video_name)
            # 下载，保存到数据库
            self.save_full_to_datebase(detail_data)
            video_id = self.get_video_id(md5_url)
            # 写入redis
            self.distinct_url_redis(download_data)

            # 保存图片和标签的关系
            if not tag_ids:
                continue
            for tag_id in tag_ids:
                self.save_video_tag_relationship(video_id, tag_id)
            time.sleep(random_time)



    def __del__(self):
        self.mysql_cursor.close()
        self.mysql_conn.close()


random_time = random.randint(1, 3)
query = 'life'  #
#，businessman,Business, factory, manufacture ,Production , Consumption, consume,sports, cars, real estate, health, games, history, , military, home, parenting
#/trade/commercial trade/commerce/teamwork///success/Entrepreneur///architecture/////marketing/
#/fintech/web///entertainment/finance/natural scenery/humanities/festivals/blockchain/cooperation/decentralized
#///////'', '','','',data-analysis，,dashboard,corporate,

# my_list = ['', '','','natural scenery', 'humanities', 'festivals', 'services', 'blockchain', '', 'decentralized']
mixkit = MixkitVideoSpiderProduce(query)
mixkit.run()
#finance/Technology/website/data-analysis/report/money/accounting/stock/bank/services/handshake/trust/tech/web/team/invest/work/job/code/office/budget/AI
#Intelligence/cities/company/corporate/entertainment










