# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import requests
import os
import pdfkit
import shutil
# html文件转为pdf依赖包
import scrapy
from scrapy.pipelines.files import FilesPipeline
import pymysql
from .settings import MYSQL_CONNECT, REDIS_CONNECT, DEFAULT_REQUEST_HEADERS, FILES_STORE
import redis


class SingaporespiderPipeline(object):
    # 将爬取的网站源码放入json文件中
    def __init__(self):
        self.filejson = open('D:/GlobalLawFiles/data.json', 'r', encoding='utf-8')

    def open_spider(self, spider):
        pass

    def close_spider(self, spider):
        self.filejson.close()

    def process_item(self, item, spider):
        # 存储网站源码，ensure_ascii = False （防止写入的是ASCII字符码）
        # line = json.dumps(dict(item), ensure_ascii=False) + '\n'
        # 写入文件
        # self.filejson.write(line)
        # print('网站源码存入json成功')
        return item


class MyFilesPipeline(FilesPipeline):
    # 构造下载文件的文件存储路径以及文件名
    def get_media_requests(self, items, info):
        country = items['country']
        website = items['website']
        modular = items['modular']
        ext = items['ext']
        fina = items['fina']
        for fu in items['file_urls']:
            if not fu:
                return None
            yield scrapy.Request(fu, meta={'country': country,
                                           'website': website,
                                           'modular': modular,
                                           'fina': fina,
                                           'ext': ext})

    def file_path(self, request, response=None, info=None):
        # 下载文件的文件路径以及文件名函数
        filename = r'%s\%s\%s\%s.%s' % (
            request.meta['country'],
            request.meta['website'],
            request.meta['modular'],
            request.meta['fina'],
            request.meta['ext'])
        return filename


class MySqlpipeline(object):
    # 将爬取的item数据存入数据库中
    Redis_host = REDIS_CONNECT['REDIS_HOST']
    Redis_port = REDIS_CONNECT['REDIS_PORT']
    Redis_db = REDIS_CONNECT['REDIS_DB']
    Reddis_password = REDIS_CONNECT['REDIS_PASSWD']

    def __init__(self):
        # 连接数据库
        self.conn = pymysql.connect(host=MYSQL_CONNECT['MYSQL_HOST'],
                                    port=MYSQL_CONNECT['MYSQL_PORT'],
                                    db=MYSQL_CONNECT['MYSQL_DB'],
                                    user=MYSQL_CONNECT['MYSQL_USER'],
                                    passwd=MYSQL_CONNECT['MYSQL_PASSWD'],
                                    charset=MYSQL_CONNECT['MYSQL_CHARSET'])
        # 连接redis数据库
        self.Reidsconn = redis.ConnectionPool(host=REDIS_CONNECT['REDIS_HOST'],
                                              port=REDIS_CONNECT['REDIS_PORT'],
                                              db=REDIS_CONNECT['REDIS_DB'],
                                              password=REDIS_CONNECT['REDIS_PASSWD'],
                                              decode_responses=True)

    def open_spider(self, spider):
        pass

    def close_spider(self, spider):
        # 断开数据库连接
        self.conn.close()

    def process_item(self, item, spider):
        # PDF文件路径
        country = item['country']
        website = item['website']
        modular = item['modular']
        ext = item['ext']
        SYS_FLD_DIGITFILENAME = item['SYS_FLD_DIGITFILENAME'] + '.' + ext
        file_path = '/PDF/Treaty/%s/%s/%s/%s' % (country, website, modular, SYS_FLD_DIGITFILENAME)

        Title = item['Title']
        Title = pymysql.escape_string(Title)
        SortA = item['SortA']
        StateParty = item['StateParty']
        PublishDate = item['PublishDate']
        EffectiveDate = item['EffectiveDate']
        SortB = item['SortB']
        SortC = item['SortC']
        Articles = item['Articles']
        Chapter = item['Chapter']
        Section = item['Section']

        FileUrl = file_path
        DownLoadUrl = item['DownLoadUrl']
        DownLoadWebNameC = item['DownLoadWebNameC']
        DownLoadWebNameE = item['DownLoadWebNameE']
        SYSID = item['SYSID']
        Website = item['Website']
        Isconversion = item['Isconversion']

        # 数据入库前查看数据库中是否有该条数据
        sql = "select * from %s where SYSID = '%s'" % (MYSQL_CONNECT['MYSQL_TABLE'], SYSID)
        try:
            with self.conn.cursor() as cursor:
                cursor.execute(sql)
                a = cursor.fetchall()
            if not a:
                print('该数据未入库')
            else:
                print('该数据已入库')
                return item
        except Exception as e:
            print('网站数据存入mysql数据库成功异常：' + str(e))

        # 数据存入数据库
        self.conn.query('''insert into %s(Title,SortA,StateParty,PublishDate,EffectiveDate,SortB,SortC,
        Articles,Chapter,Section,SYS_FLD_DIGITFILENAME,FileUrl,DownLoadUrl,DownLoadWebNameC,
        DownLoadWebNameE,SYSID,Website,Isconversion) values ("%s","%s","%s","%s","%s","%s",
        "%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s","%s")''' % (MYSQL_CONNECT['MYSQL_TABLE'], Title,
                                                                           SortA, StateParty, PublishDate,
                                                                           EffectiveDate, SortB,
                                                                           SortC, Articles, Chapter, Section,
                                                                           SYS_FLD_DIGITFILENAME, FileUrl,
                                                                           DownLoadUrl, DownLoadWebNameC,
                                                                           DownLoadWebNameE, SYSID, Website,
                                                                           Isconversion))
        try:
            self.conn.commit()
            print('网站数据存入数据库成功')
            redisaddcursor = redis.Redis(connection_pool=self.Reidsconn)
            try:
                redisaddcursor.sadd(REDIS_CONNECT['REDIS_KEY'], SYSID)
                redisaddcursor.close()
            except Exception as e:
                print('网站id存入redis数据库异常:' + str(e))
                sql = "DELETE FROM %s WHERE SYSID = '%s'" % (MYSQL_CONNECT['MYSQL_TABLE'], SYSID)
                try:
                    with self.conn.cursor() as cursor:
                        cursor.execute(sql)
                        self.conn.commit()
                except Exception as e:
                    print('mysql删除数据异常：' + str(e))
        except Exception as e:
            print('网站数据存入mysql数据库成功异常：' + str(e))
        return item


# 将detail中的原文内容转为pdf格式保存
class MydetaHTMLpipeline(object):
    # 将爬取的html信息存为pdf文件
    def process_item(self, item, spider):
        country = item['country']
        website = item['website']
        modular = item['modular']
        fina = item['fina']
        detail = item['detail']
        print('成功进入函数')

        if len(detail) > 0:
            try:
                file_path = 'D:/GlobalLawFiles/Treaty/html/%s/%s/%s/' % (country, website, modular)
                # html文件路径
                state = os.path.exists(file_path)
                # 路径不存在则创建
                if state:
                    print('目录已存在')
                else:
                    os.makedirs(file_path)
                file_name = '%s.html' % fina
                with open(file_path + file_name, 'w', encoding='utf-8') as file_html:
                    print('成功建立html文档')
                    file_html.write('<!DOCTYPE html>' + '\n' +
                                    '<html>' + '\n' +
                                    '<head>' + '\n' +
                                    '<title> Treaty </title>' +
                                    '</head>' + '\n' +
                                    '<body>' + detail + '</body>' + '</html>'
                                    )
                    file_html.close()

                with open(file_path + file_name, 'r', encoding='utf-8') as fhtml:
                    pdf_path = '%s\\%s\\%s\\%s\\' % (FILES_STORE, country, website, modular)
                    # 判断pdf文件路径是否存在
                    state = os.path.exists(pdf_path)
                    # 路径不存在则创建
                    if state:
                        print('目录已存在')
                    else:
                        os.makedirs(pdf_path)
                    out_path = pdf_path + '%s.pdf' % fina
                    options = {
                        'encoding': "utf-8"
                    }
                    pdfkit.from_file(input=fhtml, output_path='%s.pdf' % fina, options=options)
                    fhtml.close()
                    filepath = os.getcwd()
                    filepaths = filepath + '\\%s.pdf' % fina
                    shutil.move(filepaths, out_path)

            except Exception as e:
                print('操作异常：' + str(e))
        else:
            pass
        return item


# 将detailUrl网址中的原文内容原文存入本地html并转换为pdf文件
class MydetaHTMLUrlpipeline(object):
    def process_item(self, item, spider):
        country = item['country']
        website = item['website']
        modular = item['modular']
        fina = item['fina']
        downloadUrl = item['downloadUrl']
        detailUrl = item['detailUrl']

        if not downloadUrl:
            if detailUrl:
                try:
                    file_path = 'D:/GlobalLawFiles/Treaty/html/%s/%s/%s/' % (country, website, modular)
                    # html文件路径
                    state = os.path.exists(file_path)
                    # 路径不存在则创建
                    if not state:
                        os.makedirs(file_path)
                    file_name = '%s.html' % fina
                    # html文件名
                    print(file_path + file_name)
                    # 添加请求头
                    html_response = requests.get(detailUrl, headers=DEFAULT_REQUEST_HEADERS, verify=False)
                    htmls = html_response.text
                    with open(file_path + file_name, 'w', encoding='utf-8') as file_html:
                        print('成功建立html文档')
                        file_html.write(htmls)
                        file_html.close()

                    with open(file_path + file_name, 'r', encoding='utf-8') as fhtml:
                        pdf_path = '%s/%s/%s/%s/' % (FILES_STORE, country, website, modular)
                        # pdf文件路径
                        state = os.path.exists(pdf_path)
                        # 路径不存在则创建
                        if state:
                            print('目录已存在')
                        else:
                            os.makedirs(pdf_path)
                        out_path = pdf_path + '%s.pdf' % fina
                        options = {
                            'encoding': "utf-8"
                        }
                        pdfkit.from_file(input=fhtml, output_path='%s.pdf' % fina, options=options)
                        fhtml.close()
                        filepath = os.getcwd()
                        filepaths = filepath + '\\%s.pdf' % fina
                        shutil.move(filepaths, out_path)

                except Exception as e:
                    print('操作异常：' + str(e))
            else:
                return item
        else:
            return item
        return item


# 将dowloadUrl网址中pdf格式的原文内容存入本地pdf文件
class Myhtmlpdfpipeline(object):
    def process_item(self, item, spider):
        country = item['country']
        website = item['website']
        modular = item['modular']
        fina = item['fina']
        file_urls = item['file_urls']
        downloadUrl = item['downloadUrl']

        if not file_urls:
            if downloadUrl:
                try:
                    # 添加请求头
                    headers = {
                        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                                      'Chrome/76.0.3809.87 Safari/537.36 '
                    }
                    r = requests.get(downloadUrl, headers=headers)
                    # 存储pdf文件的路径
                    file_path = '%s/%s/%s/%s/' % (FILES_STORE, country, website, modular)
                    # 法律pdf文件存储路径
                    state = os.path.exists(file_path)
                    # 路径不存在则创建
                    if state:
                        print('目录已存在')
                    else:
                        os.makedirs(file_path)
                    file_name = '%s.pdf' % fina
                    file = open(file_path + file_name, 'wb')  # 注意要用'wb',b表示二进制，不要用'w'

                    file.write(r.content)  # r.content -> requests中的二进制响应内容：以字节的方式访问请求响应体，对于非文本请求
                    file.close()
                    r.close()
                except Exception as e:
                    print('操作异常：' + str(e))
            else:
                return item
        else:
            return item
        return item
