# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import random
import time
import json
import traceback
from jsonpath import jsonpath

import redis
import requests
import urllib3
ENVIRON_ = check_environ()


urllib3.disable_warnings()

import os
from Congressgov.utils.proxy_pool import Proxy
# os.environ['NO_PROXY'] = 'congress.gov'


import pymysql
from urllib.parse import urlparse
from traceback import format_exc

from scrapy.pipelines.files import FilesPipeline
from scrapy.utils.project import get_project_settings

from Congressgov.utils.gen_txt_path import gen_txt_path
from Congressgov.utils.update_table_manage import *
from Congressgov.utils.UserAgent import User_Agents
from Congressgov.utils.filter import Filter

# 避免下载失败
# import http.client

# http.client.HTTPConnection._http_vsn = 10
# http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0'

setting = get_project_settings()

MYSQL_SETTINGS = setting["MYSQL"]
PROXIES_LIST_SOCKS5 = setting.get('PROXIES_LIST_SOCKS5')
PROXIES_LIST = setting.get('PROXIES_LIST')


class MYSQLPipeline:
    """
    数据存储 管道  针对不同 spider，判断不同table，执行入库操作
    """

    def __init__(self):
        """
        链接数据库
        """
        self.project_path = os.path.dirname(__file__)
        self.json_path = os.path.join(self.project_path, 'json')
        self.init_max_id_record_json()
        
        self.table = ''
        # 这个列表是本次运行 执行了创建表 操作的表的列表
        self.table_list = []
        self.init_connect()
        print("MYSQL 数据库 :{} 链接成功".format(HOST))
        self.filter = Filter()
        # 当天存储的表
        self.table_temp = None
        # self.wb = Workbook()
        # self.sheet1 = self.wb.create_sheet(title="Mysheet")
        # self.sheet1.append(
        #     ['编号', '提案名称', '所属委员会', 'XX届大会', '共同发起人/赞助人', '共同赞助商', '最新动态', '情况追踪', '摘要', '文本', '行动1', '行动2', '修正案',
        #      '各委员会',
        #      '相关法案'])


    def init_connect(self):
        while True:
            try:
                self.conn = pymysql.connect(**MYSQL_SETTINGS)
                self.cursor = self.conn.cursor()
                print("mysql数据库 -{}- 连接成功".format(MYSQL_SETTINGS["host"]))
                return True
            except:
                time.sleep(5)

    def process_item(self, item, spider):
        """
        负责将解析后文章元素提取出来  并执行入库操作
        """
        item = dict(item)
        table = item['table']
        # 用于去重
        pre_id = ''

        if table == "MYSQL_RECORD_TABLE":
            item = self.record_to_file_urls(item)
           

            congress = item['congress']
            Date_and_Issue_No = item['Date_and_Issue_No']
            month = item['month']
            Daily_Digest =json.dumps(item['Daily_Digest'],ensure_ascii=False)
            Senate = json.dumps(item['Senate'],ensure_ascii=False)
            House = json.dumps(item['House'],ensure_ascii=False)
            Extensions_of_Remarks = json.dumps(item['Extensions_of_Remarks'],ensure_ascii=False)
            Entire_Issue = json.dumps(item['Entire_Issue'],ensure_ascii=False)
    

            val = (
                congress, Date_and_Issue_No, month, Daily_Digest, Senate, House,Extensions_of_Remarks,
                Entire_Issue
            )
            sql = '''insert into {}
                        (`congress`,`Date_and_Issue_No`,`month`, `Daily_Digest`, 
                        `Senate`,`House`,`Extensions_of_Remarks`,`Entire_Issue`
                        )
                        values
                        ({})'''.format(table,",".join(["%s" for i in range(len(val))]))


            # 因为这个栏目比较特殊  因此使用 Date_and_Issue_No 代替 content_url
            content_url = Date_and_Issue_No

            pass
        
        else:
            print(" 出现未定义的数据库表 : {} ".format(table))
            return item

        try:
            self.conn.ping(reconnect=True)
            self.cursor.execute(sql, val)
            self.conn.commit()
            print('数据库插入成功')
            pre_id = content_url.split('?')[0].strip()
            self.filter.update_redis_set(pre_id, spider.table)



        except pymysql.err.InterfaceError as error:
            # 数据库断开连接错误  数据库连接失效
            print('数据库错误（链接断开）{}'.format(error))
            self.again_insert(item, sql, val)  # 重新插入
        except pymysql.err.OperationalError as e:
            # 这个是数据断开错误 猜测是一开始（第一次）连接断开错误
            print('数据库错误（链接）  请查看\n{}'.format(format_exc()))
            self.again_insert(item, sql, val)  # 重新插入
        except pymysql.err.InternalError as e:
            # 插入错误  插入的值不正确
            self.conn.rollback()
            print('数据库回滚  请查看\n{}'.format(e))
        except pymysql.err.IntegrityError as e:
            # 插入错误  插入的值存在 例如md5
            self.conn.rollback()
            print('数据库回滚 {}'.format(e))

        except Exception as e:
            # print('数据库错误（其他）  请查看\n{}\n{}'.format(format_exc(), item), WARNING)
            print('数据库错误（其他）  请查看\n{}'.format(format_exc()))

        return item

    # 重新建立数据库链接  并插入数据
    def again_insert(self, item, sql, val):
        """
        处理入库失败的情况
        """
        self.init_connect()

        for i in range(1, 31):

            try:
                self.conn.ping(reconnect=True)
                self.cursor.execute(sql, val)
                self.conn.commit()
                print('数据库 重新插入成功')
                return
            except pymysql.err.IntegrityError as e:
                # 插入错误  插入的值存在 例如md5
                self.conn.rollback()
                print('数据库回滚 {}'.format(e))
                break
            except Exception as e:
                self.conn.rollback()
                # print('数据库错误（重新插入） 回滚\n{}\n{}'.format(e, item), WARNING)
                print('数据库错误（重新插入） 回滚')
                time.sleep(10)
            if i % 10 == 0:
                self.init_connect()


    def create_table(self, sql, table):
        """
        每天会生成新的表名 通过表名和对应建表sql 建表
        将该表加入到self.table_list 用于程序结束后导出表中数据
        :param sql: 建表sql
        :param table: 表名
        :return:
        """
        try:
            self.cursor.execute(sql)
            self.conn.commit()
            print("创建表成功...:{}".format(table))
            self.table_list.append(table)
        except Exception as e:
            print("创建表失败!!!:{}".format(table))
            print(e)
            print(traceback.format_exc())

    def select_table(self):
        """
        查询当前数据库中所有表名
        :return:
        """
        sql = "show tables;"
        self.cursor.execute(sql)
        tables = self.cursor.fetchall()
        tables = [table_tuple[0] for table_tuple in tables]
        return tables

    def save_max_id(self, table):
        # 查询并存储 max_id
        sql = 'select max(id) from {};'.format(table)
        self.cursor.execute(sql)
        self.conn.commit()
        res = self.cursor.fetchone()[0]
        print("获取到的最大id为:{}".format(res))
        with open(self.max_id_record_json, 'r') as f:
            obj = json.load(f)

        obj[table] = res
        with open(self.max_id_record_json, 'w') as ff:
            json.dump(obj, ff)

    def get_max_id(self, table):
        # 程序结束后 查询最大id  并记录在 json 目录下的max_id_record.json 中  用于下次导出数据
        with open(self.max_id_record_json, 'r') as f:
            obj = json.load(f)
            max_id = obj.get(table, 0)
            return max_id

    
    def close_spider(self, spider):
        # 获取之前储存的最大id
        max_id = self.get_max_id(spider.table)
        # 导出数据
        # dump_table(spider.table, max_id, MYSQL_ROOT_PWD, CURRENT_DIR, CURRENT_DATE)
        # save_max_id
        self.save_max_id(spider.table)
        # 删除更新文件夹下的过期文件

        self.cursor.close()
        self.conn.close()



