# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import random
import time
import json
import traceback
from jsonpath import jsonpath

import redis
import requests
import urllib3
ENVIRON_ = check_environ()


urllib3.disable_warnings()

import os
from Congressgov.utils.proxy_pool import Proxy
# os.environ['NO_PROXY'] = 'congress.gov'


import pymysql
from urllib.parse import urlparse
from traceback import format_exc

from scrapy.pipelines.files import FilesPipeline
from scrapy.utils.project import get_project_settings

from Congressgov.utils.gen_txt_path import gen_txt_path
from Congressgov.utils.update_table_manage import *
from Congressgov.utils.UserAgent import User_Agents
from Congressgov.utils.filter import Filter

# 避免下载失败
# import http.client

# http.client.HTTPConnection._http_vsn = 10
# http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'

setting = get_project_settings()

PROXIES_LIST_SOCKS5 = setting.get('PROXIES_LIST_SOCKS5')
PROXIES_LIST = setting.get('PROXIES_LIST')




class SelfDefineFileBasePipeline:
    def __init__(self, *args, **kwargs):
        self.current_date = CURRENT_DATE
        self.SAVE_STORE = None
        self.UPDATE_STORE = None
        self.project_path = os.path.dirname(__file__)
        self.json_path = os.path.join(self.project_path, 'json')
        self.proxy_pool = Proxy()
        self.error_file_list = []
        self.headers = {
            'authority': 'www.congress.gov',
            'pragma': 'no-cache',
            'cache-control': 'no-cache',
            'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="97", "Chromium";v="97"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4676.0 Safari/537.36',
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            'sec-fetch-site': 'none',
            'sec-fetch-mode': 'navigate',
            'sec-fetch-user': '?1',
            'sec-fetch-dest': 'document',
            'accept-language': 'zh-CN,zh;q=0.9',
        }
        self.session = requests.Session()
        self.connect_redis()

    def connect_redis(self):
        self.redis = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD, db=FILE_DB)

    def download(self, file_url, file_path, update_file_path=None):
        # 重试五次  下载成功即停止
        for i in range(5):
            try:
                headers = self.headers
                headers['user-agent'] = random.choice(User_Agents)
                if ENVIRON_:
                    proxy_ = random.choice(PROXIES_LIST_SOCKS5)
                    proxies = self.gen_proxies(proxy_)
                    print("本次文件下载使用了代理:{}".format(proxy_))
                    response = self.session.get(file_url, headers=headers, proxies=proxies, verify=False,
                                                timeout=60 * 10,
                                                stream=True)
                else:
                    response = self.session.get(file_url, headers=headers, verify=False, timeout=60 * 10, stream=True)

                if update_file_path:
                    with open(update_file_path, 'wb') as f:
                        f.write(response.content)
                # print("文件下载成功:{}".format(file_path))
                print("更新文件下载成功:{}".format(update_file_path))
                time.sleep(0.5)
                return True
            except Exception as e:
                print("文件下载异常 :{}".format(file_url))
                print(traceback.format_exc())

    def gen_proxies(self, proxy_):

        proxies = {
            'http': proxy_,
            'https': proxy_
        }
        return proxies

    def process_item(self, item, spider):
        file_urls = item.get('file_urls', [])

        for file_url in file_urls:
            if self.redis.sismember(FILE_SET, file_url):
                continue
            result = self.handle_single_url(file_url)

            if result:
                self.redis.sadd(FILE_SET, file_url)
            else:
                self.error_file_list.append(file_url)

        return item

    def handle_single_url(self, file_url):
        """
        处理单个文件  先获取文件的下载路径 然后调用download 方法执行下载
        :param file_url:
        :return:
        """
        path = self.file_path(file_url)

        file_path = os.path.join(self.SAVE_STORE, path)
        file_dir = os.path.dirname(file_path)
        if not os.path.exists(file_dir):
            os.makedirs(file_dir)

        update_file_path = None
        if self.UPDATE_STORE:
            update_file_path = os.path.join(self.UPDATE_STORE, path)
            update_file_dir = os.path.dirname(update_file_path)
            if not os.path.exists(update_file_dir):
                os.makedirs(update_file_dir)

        result = self.download(file_url, file_path, update_file_path)
        return result

    def file_path(self, url):
        if 'format=txt' in url:
            path = gen_txt_path(url)
        else:
            parse_result = urlparse(url)
            path = parse_result.path

        if path.startswith('/'):
            path = path[1:]
        return path

    def last_handle(self):
        # 在程序结束时   对下载失败的文件url 再次执行下载操作
        for file_url in self.error_file_list:
            result = self.handle_single_url(file_url)
            if result:
                self.error_file_list.remove(file_url)

    def close_spider(self, spider):
        # 下载异常的文件链接  再次重试
        self.last_handle()
        # 程序结束时调用该方法  将异常图片链接存储在json文件夹中

        error_image_path = os.path.join(self.json_path,
                                        '{}_{}_error_file_list.json'.format(spider.name, self.current_date))

        with open(error_image_path, 'w') as f:
            json.dump(self.error_file_list, f)
        # 最后执行删除文件夹的操作
        drop_dir_temp(FILES_STORE_UPDATE)


class MyFilePipeline(SelfDefineFileBasePipeline):
    """
    自写下载 文件的 pipeline
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.SAVE_STORE = FILES_STORE
        

class MyFileUpdatePipeline(MyFilePipeline):
    """
    自写下载 文件的 pipeline
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.UPDATE_STORE = os.path.join(FILES_STORE_UPDATE, CURRENT_DATE)


class MyImagePipeline(SelfDefineFileBasePipeline):
    """
    自写下载 图片的 pipeline
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.SAVE_STORE = IMAGES_STORE
        self.no_jpg_url_list = []

    def process_item(self, item, spider):
        image_urls = item.get('image_urls', [])
        for image_url in image_urls:
            # 不存在照片的情况 记录后返回
            if not image_url:
                self.no_jpg_url_list.append(item['content_url'])
                return item

            result = self.handle_single_url(image_url)
            if not result:
                self.error_file_list.append(image_url)

        return item

    def close_spider(self, spider):
        # 下载异常的文件链接  再次重试
        self.last_handle()
        # 程序结束时调用该方法  将异常图片链接存储在json文件夹中
        error_image_path = os.path.join(self.json_path,
                                        '{}_{}_error_file_list.json'.format(spider.name, self.current_date))
        no_jpg_url_list = os.path.join(self.json_path,
                                       '{}_{}_no_jpg_url_list.json'.format(spider.name, self.current_date))

        with open(error_image_path, 'w') as f:
            json.dump(self.error_file_list, f)

        with open(no_jpg_url_list, 'w') as f:
            json.dump(self.no_jpg_url_list, f)
        # 最后执行删除文件夹的操作
        drop_dir_temp(IMAGES_STORE_UPDATE)


class MyImageUpdatePipeline(MyImagePipeline):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.UPDATE_STORE = os.path.join(IMAGES_STORE_UPDATE, CURRENT_DATE)





