import random
import re
import time
import pymysql
import redis
import requests
import unicodedata
from utils.msg_queue import connect_message_queue
from setting.redis_config import redis_test_url
from setting.mysql_test import mysql45_config
from unsplash_spider_image.user_agent_kuaidaili_proxy import get_random_ua
from utils.md5 import MD5Utils
from tag_mapping import tags_mapping
import datetime

#redsi_test_url = "redis://192.168.1.22:6379/1"  ip会动态变化的

xhr_url = 'https://unsplash.com/napi/search/photos?'
proxy = {'http': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/', 'https': 'http://t10635913038651:09ht71vf@tps152.kdlapi.com:15818/'}
REDIS_PASSWORD = None


class UnsplashImageSpiderProduce(object):
    def __init__(self, query):
        self.redis_url = redis_test_url
        #统一为小写
        self.query = query.lower()
        self.mysql_conn = pymysql.connect(**mysql45_config)
        self.mysql_cursor = self.mysql_conn.cursor()
        self.redis_conn = redis.Redis(host='localhost', port=6379, db=2, password=REDIS_PASSWORD)

    def redis_new_queue(self, json_msg):
        q = connect_message_queue(self.query, url=self.redis_url, maxsize=10000, lazy_limit=True)
        q.put(json_msg)
        print('插入redis队列成功')

    def distinct_url_redis(self, json_msg):
        md5_url = json_msg['md5_url']
        if self.redis_conn.sismember('unsplash_url_set', md5_url):
            print('md5_url已存在跳过')
            return
        else:
            self.redis_conn.sadd('unsplash_url_set', md5_url)
            self.redis_new_queue(json_msg)

    def distinct_retry_url_redis(self, url):
        if self.redis_conn.sismember('retry_url_set', url):
            print('retry_url已存在跳过')
            return True
        else:
            self.redis_conn.sadd('retry_url_set', url)
            return False

    def get_detail_data(self, slug, height, width, oss_image_url, tag_title, image_source, oss_image_name):
        # 非会员图片，下载，保存到数据库
        data = {}
        size = str(width) + 'x' + str(height)
        data['oss_image_url'] = oss_image_url
        data['oss_image_name'] = oss_image_name
        md5_url = MD5Utils.encrypt(oss_image_url)
        data['md5_url'] = md5_url
        data['height'] = height
        data['width'] = width
        data['size'] = size
        data['tag_title'] = tag_title
        data['source'] = image_source
        data['slug'] = slug
        data['download_status'] = 0
        print('data', data)

        download_data = {}
        download_data['oss_image_url'] = oss_image_url
        download_data['oss_image_name'] = oss_image_name
        download_data['md5_url'] = MD5Utils.encrypt(oss_image_url)
        print('download', download_data)
        return data, download_data, md5_url


    def save_full_to_datebase(self, json_msg):
        # 解析json数据
        oss_image_url = json_msg['oss_image_url']
        oss_image_name = json_msg['oss_image_name']
        height = json_msg['height']
        width = json_msg['width']
        size = json_msg['size']
        tag_title = json_msg['tag_title']
        source = json_msg['source']
        slug = json_msg['slug']
        md5_url = json_msg['md5_url']
        download_status = json_msg['download_status']

        # 查询名称是否已存在
        select_sql = "SELECT * FROM image_info WHERE md5_url='%s'" % md5_url
        print('select_sql', select_sql)
        self.mysql_cursor.execute(select_sql)
        result = self.mysql_cursor.fetchone()

        if result:
            # 找到重复数据,跳过插入
            print("Image %s already exists, skip insert" % oss_image_name)
            return

        insert_sql = "INSERT INTO image_info (search_key, source, oss_image_name, oss_image_url, md5_url, height, width, size, tag_title, download_status, slug, create_on) " \
                     "VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW())"
        self.mysql_cursor.execute(insert_sql, (self.query, source, oss_image_name, oss_image_url, md5_url, height, width, size, tag_title, download_status, slug))
        self.mysql_conn.commit()


    def get_image_id(self, md5_url):
        select_image_id_sql = "SELECT id FROM image_info WHERE md5_url='%s'" % md5_url
        self.mysql_cursor.execute(select_image_id_sql)
        image_id = self.mysql_cursor.fetchone()
        print('image_id', image_id)
        return image_id

    def xhr_get_method(self, page, page_size):
        #获取列表页图片数据
        new_user_agent = get_random_ua()
        print('new_user_agent', new_user_agent)
        headers = {
            'Connection': 'close',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Cookie': 'require_cookie_consent=false; xp-homepage-modules=control; _ga=GA1.1.636001785.1698830047; _ga_21SLH4J369=GS1.1.1698830047.1.1.1698830589.0.0.0; _sp_ses.0295=*; uuid=57199300-7898-11ee-a294-91ba99a38132; azk-ss=true; lux_uid=169883062067561063; _sp_id.0295=6d99e695-57bc-41b5-85c0-b002300f664c.1698830619.1.1698830629..ef7284bf-c813-4b9d-9762-325d78e7de37..3000feb2-447f-42cf-a71b-b96a677c2e9b.1698830618652.4',
            'User-Agent': new_user_agent
        }

        data = {
            'page': page,
            'query': self.query,
            'per_page': page_size}
        #requests.DEFAULT_RETRIES = 5

        response = requests.get(xhr_url, params=data, headers=headers, proxies=proxy, timeout=45)
        response.encoding = 'utf-8'
        xhr_json = response.json()
        print('response', response.status_code)
        request_url = response.url
        print('request_url', request_url)
        return xhr_json, request_url

    def retry_url(self, retry_url):
        new_user_agent = get_random_ua()
        print('new_user_agent', new_user_agent)
        headers = {
            'Connection': 'close',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
            'Cookie': 'require_cookie_consent=false; xp-homepage-modules=control; _ga=GA1.1.636001785.1698830047; _ga_21SLH4J369=GS1.1.1698830047.1.1.1698830589.0.0.0; _sp_ses.0295=*; uuid=57199300-7898-11ee-a294-91ba99a38132; azk-ss=true; lux_uid=169883062067561063; _sp_id.0295=6d99e695-57bc-41b5-85c0-b002300f664c.1698830619.1.1698830629..ef7284bf-c813-4b9d-9762-325d78e7de37..3000feb2-447f-42cf-a71b-b96a677c2e9b.1698830618652.4',
            'User-Agent': new_user_agent
        }
        response = requests.get(retry_url, headers=headers, proxies=proxy)
        response.encoding = 'utf-8'
        retry_json = response.json()
        request_url = response.url
        print('response', response.status_code)
        return retry_json, request_url

    def get_label_title(self, slug):
        # 获取图片详情页面标签
        next_url = 'https://unsplash.com/napi/photos/' + slug
        referer_url = 'https://unsplash.com/photos/' + slug
        print(next_url)
        new_user_agent = get_random_ua()
        print('new_user_agent', new_user_agent)

        next_headers = {
            'authority': 'unsplash.com',
            'method': 'GET',
            "path": "/napi/photos/" + slug + "/related",
            'scheme': 'https',
            'Accept': '*/*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'en-US',
            'Cookie': 'require_cookie_consent=false; xp-homepage-modules=control; _ga=GA1.1.636001785.1698830047; uuid=57199300-7898-11ee-a294-91ba99a38132; azk-ss=true; _sp_ses.0295=*; lux_uid=169892712058797154; _ga_21SLH4J369=GS1.1.1698926835.4.1.1698927543.0.0.0; _sp_id.0295=6d99e695-57bc-41b5-85c0-b002300f664c.1698830619.6.1698927564.1698912605.63e4a5c3-1926-4dad-82e0-7f9c38e4862f.c32909ff-0fa1-4852-ae1b-cfefd853b14a.ff816729-e9f9-4182-a568-d4db4518c17d.1698927119379.97',
            'Referer': referer_url,
            'Sec-Ch-Ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
            'Sec-Ch-Ua-Mobile': '?0',
            'Sec-Ch-Ua-Platform': '"Windows"',
            'Sec-Fetch-Dest': 'empty',
            'Sec-Fetch-Mode': 'cors',
            'Sec-Fetch-Site': 'same-origin',
            'User-Agent': new_user_agent
        }
        data = {
            'slug': slug
            }
        next_response = requests.get(next_url, params=data, headers=next_headers, proxies=proxy,timeout=15)
        next_response.encoding = 'utf-8'
        json_msg = next_response.json()
        print('next_response', next_response.status_code)
        return json_msg

    def sanitize_tag(self, tag):
        """
        过滤不合法字符
        """
        if tag is None:
            return None
        # 去除不可打印的字符
        tag = ''.join(c for c in tag if unicodedata.category(c)[0] != 'C')
        # 转换为 ASCii 字符中的可打印字符
        tag = ''.join(c if ord(c) < 128 else ' ' for c in tag)
        # 去除中括号和空格
        tag = tag.strip().replace('[', '').replace(']', '').replace(' ', '_')
        return tag

    def parse_html(self, p, i):
        slug = i.get('slug') if i.get('slug') or len(i.get('slug')) != 0 else None
        height = i.get('height') if i.get('height') else None
        width = i.get('width') if i.get('width') else None
        oss_image_url = i.get('urls').get('full') if i.get('urls').get('full') or len(i.get('urls').get('full')) != 0 else None
        tags_previews = i.get('tags_preview') if i.get('tags_preview') or len(i.get('tags_preview')) != 0 else None
        if tags_previews is None:
            return slug, height, width, oss_image_url, None

        tag_title = []
        for j in tags_previews:
            tag = j.get('title') if j.get('title') or len(j.get('tags')) != 0 else None
            tag = self.sanitize_tag(tag)
            if not tags_mapping.get(tag, None):
                tag_id = self.save_tag_to_mysql(tag)
                tags_mapping[tag] = tag_id
            tag_title.append(tag)
        # 将tag_title转换为字符串，用逗号分隔
        tag_title = ','.join(tag_title)
        print('第', p, 'tag_title', tag_title)
        return slug, height, width, oss_image_url, tag_title

    def save_tag_to_mysql(self, tag):
        select_tag_id_sql = 'select id from tag_info where tag = %s'
        self.mysql_cursor.execute(select_tag_id_sql, (tag,))
        tag_id = self.mysql_cursor.fetchone()

        if tag_id:
            return tag_id

        # chinese_tag = convert_to_chinese(tag)
        # time.sleep(0.8)
        insert_tag_sql = 'insert into tag_info (tag, create_on) values (%s, NOW())'
        self.mysql_cursor.execute(insert_tag_sql, (tag,))
        self.mysql_conn.commit()

        select_tag_id_sql = 'select id from tag_info where tag = %s'
        self.mysql_cursor.execute(select_tag_id_sql, (tag,))
        tag_id = self.mysql_cursor.fetchone()
        return tag_id

    def save_image_tag_relationship(self, image_id, tag_id):
        distinct_image_tag_sql = 'select id from image_tag_relationship where image_id = %s and tag_id = %s'
        self.mysql_cursor.execute(distinct_image_tag_sql, (image_id, tag_id))
        image_tag_id = self.mysql_cursor.fetchone()
        if image_tag_id:
            return image_tag_id

        insert_image_tag_sql = 'insert into image_tag_relationship (image_id, tag_id, create_on) values (%s, %s, NOW())'
        self.mysql_cursor.execute(insert_image_tag_sql, (image_id, tag_id))
        self.mysql_conn.commit()

    def judge_premium_image(self, oss_image_url):
        if not oss_image_url:
            print('图片不存在跳过')
            return True, None, None

        image_source = oss_image_url.split('//')[-1].split('/')[0]
        image_source = image_source if image_source or len(image_source) != 0 else None
        image_name = oss_image_url.split('/')[-1].split('?')[0]
        image_name = image_name if image_name or len(image_name) != 0 else None
        if image_name.startswith('premium'):
            return True, None, None
        # 获取图片类型
        type_match = re.search(r'fm=(.*?)&', oss_image_url)
        if not type_match:
            image_type = 'jpg'
        image_type = type_match.group(1)

        if len(image_name) == 0 and len(image_type) == 0:
            print('图片名或图片类型不存在，跳过')
            return True, None, None

        # 获取当前时间戳并转换为字符串
        now_time = datetime.datetime.now()# 显示更加精确的时间（包括微秒）
        now_time_str = now_time.strftime("%Y%m%d_%H%M%S.%f")  # 将时间对象转换为字符串
        oss_image_name = self.query + '_' + now_time_str + '.' + image_type
        if oss_image_name.startswith('premium'):
            return True, oss_image_name, image_source
        else:
            return False, oss_image_name, image_source


    def run(self, pageindex, query, page_size):
        global request_url, page_json
        try:
            page_json, request_url = self.xhr_get_method(pageindex, page_size)
        except Exception as e:
            retry_url_queue_name = 'page_url_retry_queue'
            retry_result = self.distinct_retry_url_redis(request_url)
            if retry_result:
                return
            q = connect_message_queue(retry_url_queue_name, url=self.redis_url, maxsize=10000, lazy_limit=True)
            q.put(request_url)
            print('pageindex', pageindex, '出错了', e)
            return


        result = page_json['results']
        for p, i in enumerate(result):
            slug, height, width, oss_image_url, tag_title = unsplash.parse_html(p, i)
            result, oss_image_name, image_source = unsplash.judge_premium_image(oss_image_url)
            detail_data, download_data, md5_url = unsplash.get_detail_data(slug, height, width, oss_image_url,
                                                                           tag_title, image_source, oss_image_name)
            if result:
                # 会员图片，有水印，跳过
                continue
            else:
                # 非会员图片，下载，保存到数据库
                unsplash.save_full_to_datebase(detail_data)
                image_id = unsplash.get_image_id(md5_url)
                # 写入redis
                unsplash.distinct_url_redis(download_data)

                # 保存图片和标签的关系
                if not tag_title:
                    continue
                for tag in tag_title.split(','):
                    tag_id = tags_mapping.get(tag) if tags_mapping.get(tag) else None
                    unsplash.save_image_tag_relationship(image_id, tag_id)
        # print('tags_mapping',tags_mapping)
        print('第', pageindex, '页下载完成')
        time.sleep(random_time)

    def retry_run(self, url):
        global request_url, page_json
        try:
            page_json, request_url = self.retry_url(url)
            # 设置超时时间
        except Exception as e:
            retry_queue_name = query + 'retry2'
            retry_result = self.distinct_retry_url_redis(request_url)
            if retry_result:
                return
            q = connect_message_queue(retry_queue_name, url=self.redis_url, maxsize=10000, lazy_limit=True)
            q.put(request_url)
            print('出错了', e)
            return

        result = page_json['results']
        for p, i in enumerate(result):
            slug, height, width, oss_image_url, tag_title = unsplash.parse_html(p, i)
            result, oss_image_name, image_source = unsplash.judge_premium_image(oss_image_url)
            detail_data, download_data, md5_url = unsplash.get_detail_data(slug, height, width, oss_image_url,
                                                                           tag_title, image_source, oss_image_name)
            if result:
                # 会员图片，有水印，跳过
                continue
            else:
                # 非会员图片，下载，保存到数据库
                unsplash.save_full_to_datebase(detail_data)
                image_id = unsplash.get_image_id(md5_url)
                # 写入redis
                unsplash.distinct_url_redis(download_data)

                # 保存图片和标签的关系
                if tag_title:
                    for tag in tag_title.split(','):
                        tag_id = tags_mapping.get(tag) if tags_mapping.get(tag) or len(
                            tags_mapping.get(tag)) != 0 else None
                        unsplash.save_image_tag_relationship(image_id, tag_id)
        # print('tags_mapping',tags_mapping)
        print(request_url + '页下载完成')
        time.sleep(random_time)

    def __del__(self):
        self.mysql_cursor.close()
        self.mysql_conn.close()


random_time = random.randint(1, 3)
page = 1
query = 'report'  #
#，,Technology, ,  , , , ,sports, cars, real estate, health, games, history, life, military, home, parenting
#//website/data-analysis/accounting/report/analysist//invest/equity/budget/financing/computer/office/company/architecture/cashflow///code/marketing/
#tech/fintech/web/cities/money/entertainment/finance/natural scenery/humanities/festivals/services/blockchain/cooperation/decentralized
#bank/stock/accounting/trust/handshake/fintech/web/'cities', 'money','entertainment','finance',data-analysis，website,dashboard,team，work、Artificial Intelligence/AI,Service,job,corporate,invest

# my_list = ['data-analysis', 'accounting','report','natural scenery', 'humanities', 'festivals', 'services', 'blockchain', 'cooperation', 'decentralized']
# for query in my_list:
page_size = 20
unsplash = UnsplashImageSpiderProduce(query)
xhr_json, url = unsplash.xhr_get_method(page, page_size)
total_pages = xhr_json['total_pages']
print('total_pages', total_pages)
if total_pages > 200:
    total_pages = 200
for pageindex in range(7, total_pages+1):  # money
    unsplash.run(pageindex, query, page_size)












