# -*- coding: utf-8 -*-
import json
import os
from time import sleep

import scrapy
from bs4 import BeautifulSoup as bs
import requests
import subprocess
import xlrd
import pymongo
import bson
from utils.PDFReader import CPdf2TxtManager


class FujianSpider(scrapy.Spider):
    name = "fujian"
    allowed_domains = ["www.fjjs.gov.cn"]
    base_url = 'http://www.fjjs.gov.cn/was5/web/search?channelid=251380&sortfield=-docorderpri%2C-docreltime&classsql' \
               '=chnlid%3D2579%2C3781%2C3782%2C3783%2C3784%2C3785%2C3786%2C3787%2C3788%2C3789%2C3790%2C3791%2C3792' \
               '%2C3793%2C3794%2C3795%2C3796%2C3797&random=0.35864295771482335&prepage=10&page='
    start_urls = [base_url + '1']

    db = pymongo.MongoClient("127.0.0.1", 27017).tongji_zjj.spider_info

    download_header = {
        'accept': "text/plain, */*; q=0.01",
        'accept-encoding': "gzip, deflate",
        'accept-language': "zh-CN,zh;q=0.9",
        'cache-control': "no-cache",
        'connection': "keep-alive",
        'cookie': "JSESSIONID=C9A0179ADD3FAA97118C0822A629404E",
        'dnt': "1",
        'host': "www.fjjs.gov.cn",
        'pragma': "no-cache",
        'referer': "http://www.fjjs.gov.cn/xxgk/zxwj/",
        'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/65.0.3325.181 Safari/537.36",
        'postman-token': "b69d9e6f-6f89-df81-c85e-e054bbd75e40"
    }

    def parse(self, response):

        html = self.process_html(response.text)
        docObj = json.loads(html)
        # 解析到pagenum

        for i in range(1, 1123):
            # if i > 3 :
            #     return
            # 请求分页数据
            yield scrapy.Request(url=self.base_url + str(i), callback=self.parse_page, meta={"pageCount": i})

    def parse_page(self, response):
        processCount = response.meta["pageCount"] * 10 - 10
        html = self.process_html(response.text)
        docObj = json.loads(html)
        for j in docObj["docs"]:
            # 获取详情
            response_detail = requests.get(j['url'], headers=self.download_header)
            itemInfo = self.parse_detail(response_detail, j, processCount)
            self.db.save(itemInfo)
            processCount += 1

    def process_html(self, html):
        return html.replace("'", '"') \
            .replace(" ", "") \
            .replace("\n", "") \
            .replace("\r", "") \
            .replace("\t", "") \
            .replace("<br>", "") \
            .replace("&nbsp;", "") \
            .replace(',{"title":"文章标题","url":"链接","time":"发布时间","content":"正文","chnl":"栏目名"}', "") \
            .replace(u'\3000', "")

    def getFileBaseUrl(self, reUrl):
        return reUrl.split("/t")[0]

    def parse_detail(self, response, baseInfo, aCount):
        self.logger.warn("目前执行的是第：" + str(aCount) + "个")
        # baseInfo的默认数据
        baseInfo['fileCount'] = 0
        baseInfo['fileContent'] = ""
        baseInfo['fileRealName'] = []
        baseInfo['fileRealInfo'] = []
        baseInfo['pdfToHtmlInfo'] = []
        baseInfo['province_type'] = 'fujian'
        try:
            # 通过bs4获取网页文本内容
            htmlOBJ = bs(str(response.content, 'utf-8'), 'lxml')
            # 通过网页分析发现分为新旧2种格式
            detailCon = htmlOBJ.find("div", {"id": "detailCon"})
            if detailCon:
                trs_editor = detailCon.find("div", {"class": "TRS_Editor"})
                htmlStr = trs_editor.get_text()
            else:
                trs_editor = htmlOBJ.find("div", {"class": "TRS_Editor"})
                if trs_editor:
                    trs_editor_ps = trs_editor.find_all("p", {"align": "justify"})
                    if len(trs_editor) > 0:
                        htmlStr = ""
                        for i in trs_editor_ps:
                            htmlStr += i.get_text()
                    else:
                        htmlStr = ""
                else:
                    htmlStr = ""
                    print("获取p失败")

            # 获取了网页的信息，下面开始处理附件文档信息
            fjDiv = htmlOBJ.find("div", {"class": "xl_fj"})
            fileStr = ""
            fileCount = 0
            recordFileName = []

            # pdf转html的源文件存储
            if fjDiv is not None:
                file_href = fjDiv.find_all("a")
                if len(file_href) > 0:
                    # 有可能出现多个文件的情况
                    fileCount += 1
                    for a in file_href:
                        sleep(1)
                        fileUrl = self.getFileBaseUrl(response.url) + a.get("href")[1:]
                        # 获取文件名，保存文件的位置
                        fileName = "./tempFile/" + a.get("href")[2:]
                        # 获取文件格式
                        ext = a.get('href').split(".")[-1]
                        # 暂定不支持rar附件的内容获取，所以直接不去下载rar文件资源
                        if ext != 'rar':
                            with open(fileName, "wb") as code:
                                contentInfo = requests.get(fileUrl, headers=self.download_header).content
                                code.write(contentInfo)
                        else:
                            return baseInfo

                        # 开始进行读取file内容的操作，根据文件类型调用不用的库或方法读取文件内容
                        recordFileName.append(fileName)

                        # 执行doc，docx读取,使用antiword命令行工具，将文件内容直接获取到命令行后获取写入到数据库中
                        if ext == 'doc' or ext == 'docx':

                            try:
                                fileStr += subprocess.check_output(["antiword", fileName]).decode().replace("[pic]", "")
                            except Exception as e:
                                fileStr += ""
                        else:
                            # 执行读取xls、xlsx文件内容，使用rlxd模块，对所有单元格遍历然后获取单元格文本内容然后拼接返回
                            if ext == 'xls' or ext == 'xlsx':
                                # 执行excel读取任务
                                try:
                                    fileStr += self.getExcelData(fileName)
                                except Exception as e:
                                    fileStr += ""
                            else:
                                # 执行读取pdf的文件内容，使用pdf2htmlEX工具，在命令行将pdf文件转换成html文件后，通过bs4进行获取文本内容。并
                                # 且将html文件信息也存入数据库，是的页面上也能直接查看pdf文件信息
                                if ext == 'pdf':
                                    self.logger.warn("开始处理pdf文件：")
                                    baseInfo['pdf_type'] = 1
                                    fileContentStr, htmlFileName = CPdf2TxtManager().changeToHtmlToText(fileName)
                                    if htmlFileName:
                                        self.logger.warn("处理pdf文件完成：")
                                        # 表示识别成功了,将文件写入
                                        recordFileName.append(htmlFileName)
                                        # 文件内容写入
                                        fileStr += fileContentStr + "\n"
                                    else:
                                        self.logger.error("处理pdf文件失败：")
                                        fileStr += ""
                                else:
                                    print("看看还有啥后缀:" + ext)

            # 文件和网页内容已经读取搜集完毕，开始组织数据
            baseInfo['fileCount'] = fileCount
            baseInfo['fileContent'] = self.process_html(fileStr)
            if htmlStr:
                baseInfo['content'] = self.process_html(htmlStr)
            fileRealName = []
            for name in recordFileName:
                fileRealName.append(name.split("/")[-1])
            baseInfo['fileRealName'] = fileRealName
            # 存储成功后,删除掉下载的文件
            for unlinkfile in recordFileName:
                os.remove(unlinkfile)
            # 返回获取的数据
            return baseInfo
        except Exception as e:
            return baseInfo

    def getExcelData(self, fileName):
        excelStr = ""
        sheetCount = 1
        workbook = xlrd.open_workbook(fileName)
        for sheet in workbook.sheets():
            excelStr += "第" + str(sheetCount) + "sheet：\n"
            nrows = sheet.nrows
            for i in range(0, nrows):
                rowValues = sheet.row_values(i)
                for item in rowValues:
                    excelStr += str(item) + " "
                excelStr += "\n"
            sheetCount += 1
        return excelStr
