# -*- coding: utf-8 -*-
import os
from time import sleep

import scrapy
from bs4 import BeautifulSoup as bs
import requests
import subprocess
import xlrd
import pymongo
from utils.PDFReader import CPdf2TxtManager
from utils.docxReader import DocxReader


class ZheJiangSpider(scrapy.Spider):
    name = "zhejiang"
    allowed_domains = ["www.zjjs.gov.cn"]
    start_urls = ['http://www.zjjs.gov.cn/n71/n72/index.html']

    db = pymongo.MongoClient("127.0.0.1", 27017).tongji_zjj.spider_info

    download_header = {
        'accept': "text/plain, */*; q=0.01",
        'accept-encoding': "gzip, deflate",
        'accept-language': "zh-CN,zh;q=0.9",
        'cache-control': "no-cache",
        'connection': "keep-alive",
        'cookie': "JSESSIONID=C9A0179ADD3FAA97118C0822A629404E",
        'dnt': "1",
        'host': "www.fjjs.gov.cn",
        'pragma': "no-cache",
        'referer': "http://www.zjjs.gov.cn/n71/n72/index.html",
        'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/65.0.3325.181 Safari/537.36",
    }

    def parse(self, response):
        for i in range(1, 159):
            # if i > 158 :
            #     return
            # 请求分页数据
            pageUrl = 'http://www.zjjs.gov.cn/n71/n72/index_916_' + str(i) + '.html'
            yield scrapy.Request(url=pageUrl, callback=self.parse_page)

    def parse_page(self, response):
        htmlOBJ = bs(response.text, 'lxml')
        for j in htmlOBJ.find_all("a"):
            # 获取详情
            response_detail = requests.get('http://www.zjjs.gov.cn/' + j['href'].replace("../../", ""),
                                           headers=self.download_header)
            itemInfo = self.parse_detail(response_detail)
            self.db.save(itemInfo)

    def parse_detail(self, response):
        baseInfo = {}

        # baseInfo的默认数据
        baseInfo['fileCount'] = 0
        baseInfo['fileContent'] = ""
        baseInfo['fileRealName'] = []
        baseInfo['province_type'] = 'zhejiang'
        htmlStr = ""
        department = ""
        public_time = ""
        license_info = ""
        fileStr = ""
        fileCount = 0
        recordFileName = []
        fileNames = []

        # 通过bs4获取网页文本内容
        htmlOBJ = bs(str(response.content, 'utf-8'), 'lxml')
        # 获取发布的ID
        contentId = htmlOBJ.find("input", {"id": "contentid"})['value']

        baseInfo['contentId'] = contentId
        # 获取网页内容
        detailCon = htmlOBJ.find("div", {"id": "zoomtitl"})
        if detailCon:
            htmlStr = detailCon.get_text()
        baseInfo['htmlStr'] = htmlStr
        # 获取发布时间和部门
        IinformationDiv = htmlOBJ.find("div", {"class": "Iinformation"})
        if IinformationDiv:
            departmentSpans = IinformationDiv.find_all("span")
            department = departmentSpans[0].text.replace("发布部门：", "")
            public_time = departmentSpans[1].text.replace("发布时间：", "")
            license_info = departmentSpans[2].text.replace("文号：", "")
        baseInfo['department'] = department
        baseInfo['public_time'] = public_time
        baseInfo['license_info'] = license_info
        # 获取了网页的信息，下面开始处理附件文档信息
        fjas = detailCon.find_all("a")
        if len(fjas) > 0:
            # 表示有文件
            for a in fjas:
                if a.get("href") and a.get("href").find("/part/") != -1:
                    ext = a.get("href").split("/part/")[-1].split(".")[-1]
                    fileUrl = 'http://www.zjjs.gov.cn/' + a.get("href").replace("../../../", "")
                    sleep(1)
                    # 获取文件名，保存文件的位置
                    fileName = "./tempFile/" + a.get("href").split("/part/")[-1]

                    # 暂定不支持rar附件的内容获取，所以直接不去下载rar文件资源
                    with open(fileName, "wb") as code:
                        contentInfo = requests.get(fileUrl, headers=self.download_header).content
                        code.write(contentInfo)
                        fileNames.append(fileName)

                    if ext == "doc" or ext == "docx":
                        try:
                            antiRead = subprocess.check_output(["antiword", fileName]).decode().replace("[pic]", "")
                        except Exception as e:
                            self.logger.warning("anti读取文件失败：")
                            antiRead = DocxReader().parse(fileName)
                        fileStr += antiRead

                    if ext == "html":
                        fileStr += ""

                    if ext == "xls" or ext == "xlsx":
                        # 执行excel读取任务
                        try:
                            fileStr += self.getExcelData(fileName)
                        except Exception as e:
                            fileStr += ""

                    if ext == "pdf":
                        self.logger.warn("开始处理pdf文件：")
                        pdf_type = 1
                        try:
                            fileContentStr, htmlFileName = CPdf2TxtManager().changeToHtmlToText(fileName)
                        except Exception as e:

                            self.logger.warning("读取pdf失败")
                            self.logger.warning(str(e))

                            htmlFileName = ""
                            fileContentStr = ""

                        if htmlFileName:
                            self.logger.warn("处理pdf文件完成：")
                            # 表示识别成功了,将文件写入
                            fileStr += fileContentStr + "\n"
                        else:
                            self.logger.error("处理pdf文件失败：")
                            fileNames.remove(fileName)
                            fileStr += ""

        # 文件和网页内容已经读取搜集完毕，开始组织数据
        baseInfo['fileCount'] = fileCount
        baseInfo['fileContent'] = fileStr

        fileRealName = []
        for name in recordFileName:
            fileRealName.append(name.split("/")[-1])
        baseInfo['fileRealName'] = fileRealName
        # 存储成功后,删除掉下载的文件
        for unlinkfile in recordFileName:
            os.remove(unlinkfile)
        # 返回获取的数据
        return baseInfo

    def getExcelData(self, fileName):
        excelStr = ""
        sheetCount = 1
        workbook = xlrd.open_workbook(fileName)
        for sheet in workbook.sheets():
            excelStr += "第" + str(sheetCount) + "sheet：\n"
            nrows = sheet.nrows
            for i in range(0, nrows):
                rowValues = sheet.row_values(i)
                for item in rowValues:
                    excelStr += str(item) + " "
                excelStr += "\n"
            sheetCount += 1
        return excelStr
