# -*- coding: utf-8 -*-
import os
import re
import subprocess
import bson
import pymongo
import requests
import xlrd
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from bs4 import BeautifulSoup as bs

from utils.PDFReader import CPdf2TxtManager
from utils.docxReader import DocxReader


class ShanxiSpider(CrawlSpider):
    name = "shanxi"
    allowed_domains = ["www.shaanxijs.gov.cn"]
    start_urls = ['http://www.shaanxijs.gov.cn/zixun/list28_90.htm']

    filterStr = """window._bd_share_config={"common":{"bdSnsKey":{},"bdText":"","bdMini":"2","bdPic":"",
    "bdStyle":"0","bdSize":"16"},"share":{},"image":{"viewList":["weixin","tsina","tqq"],"viewText":"",
    "viewSize":"16"},"selectShare":{"bdContainerClass":null,"bdSelectMiniList":["weixin","tsina","tqq"]}};with(
    document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement(
    'script')).src='http://bdimg.share.baidu.com/static/api/js/share.js?v=89860593.js?cdnversion='+~(-new Date(
    )/36e5)]; """
    fileBaseUrl = "http://www.shaanxijs.gov.cn"

    db = pymongo.MongoClient("127.0.0.1", 27017).tongji_zjj.spider_info

    download_headers = {
        'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        'accept-encoding': "gzip, deflate",
        'accept-language': "zh-CN,zh;q=0.9",
        'cache-control': "no-cache",
        'connection': "keep-alive",
        'cookie': "ASP.NET_SessionId=5y5vgcai3x1e5v52ak0vwtjv",
        'dnt': "1",
        'host': "www.shaanxijs.gov.cn",
        'pragma': "no-cache",
        'referer': "http://www.shaanxijs.gov.cn/zixun/list28_90.htm",
        'upgrade-insecure-requests': "1",
        'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/65.0.3325.181 Safari/537.36 "
    }

    rules = [
        Rule(LinkExtractor(allow='/list28_(.*?).htm'),
             callback='parse_page',
             follow=True),
        Rule(LinkExtractor(allow='/news/info/(.*?)'),
             callback='parse_item',
             follow=False),
        Rule(LinkExtractor(allow='/zixun/(.*?)', restrict_xpaths="/html/body/div[2]/div[1]/div[2]/div[2]/ul"),
             callback='parse_item',
             follow=False)
    ]

    def parse_item(self, response):

        # 获取对方ID
        ziXunId = re.findall(r"/[0-9]*\.", response.url)[0].replace(".", "").replace("/", "")
        htmlOBJ = bs(response.text, 'lxml')
        contentDiv = htmlOBJ.find("div", {"class": "content"})
        title = contentDiv.find("h1").text
        autoTitleDiv = contentDiv.find("div", {"class": "autotitle"})
        publicTime = ""
        clickNum = "0"
        htmlStr = ""
        fileNames = []
        fileStr = ""
        downloadFileName = []
        pdf_type = 0

        if autoTitleDiv:
            publicTime = autoTitleDiv.find("span", string=re.compile(r"发布时间")).text.replace("发布时间：", "")
            try:
                clickNum = requests.post("http://www.shaanxijs.gov.cn/configajax/News_Click.aspx",
                                         data={"action": "ajax_SetClick", "n": ziXunId,
                                               "tID": "0"}, headers=self.download_headers).text
            except Exception as e:
                clickNum = 0
                pass

        contextDiv = contentDiv.find("div", {"class": "context"})
        try:
            if contextDiv:
                htmlStr = contextDiv.get_text().replace("分享到：", "").replace(self.filterStr, "")
                # 接下来的是文档相关的
                fileAs = contextDiv.find_all("a")
                for file in fileAs:
                    if file.get("href"):
                        if file['href'] != "#":
                            # 表示这是一个文件

                            # self.logger.warning("\n\n\n扫描到文件存在: " + file["href"] + " \n\n\n")

                            ext = self.getExtFromHref(file["href"])
                            if ext == "":
                                self.logger.warning("获取后缀失败" + file["href"])
                                continue

                            downloadFileName.append(ziXunId + "." + ext)
                            fileName = "./tempFile/" + ziXunId + "." + ext

                            with open(fileName, "wb") as code:
                                contentInfo = requests.get(self.fileBaseUrl + file["href"],
                                                           headers=self.download_headers).content
                                code.write(contentInfo)
                                fileNames.append(fileName)

                            if ext == "doc" or ext == "docx":
                                try:
                                    antiRead = subprocess.check_output(["antiword", fileName]).decode().replace("[pic]",
                                                                                                                "")
                                except Exception as e:
                                    self.logger.warning("anti读取文件失败：")
                                    antiRead = DocxReader().parse(fileName)
                                fileStr += antiRead

                            if ext == "html":
                                fileStr += ""

                            if ext == "xls" or ext == "xlsx":
                                # 执行excel读取任务
                                try:
                                    fileStr += self.getExcelData(fileName)
                                except Exception as e:
                                    fileStr += ""

                            if ext == "pdf":
                                self.logger.warn("开始处理pdf文件：")
                                pdf_type = 1
                                try:
                                    fileContentStr, htmlFileName = CPdf2TxtManager().changeToHtmlToText(fileName)
                                except Exception as e:

                                    self.logger.warning("读取pdf失败")
                                    self.logger.warning(str(e))

                                    htmlFileName = ""
                                    fileContentStr = ""

                                if htmlFileName:
                                    self.logger.warn("处理pdf文件完成：")
                                    # 表示识别成功了,将文件写入
                                    fileNames.append(htmlFileName)
                                    # 文件内容写入
                                    fileStr += fileContentStr + "\n"
                                else:
                                    self.logger.error("处理pdf文件失败：")
                                    fileNames.remove(fileName)
                                    fileStr += ""
                    else:
                        self.logger.warning("没有找到href：" + response.url)
                        pass
        except Exception as e:
            self.logger.warning("处理文件的过程中出错," + str(e))
            pass
        # 开始组织信息，准备写入
        itemSave = dict(
            title=title,
            publicTime=publicTime,
            clickNum=clickNum,
            htmlStr=htmlStr,
            fileStr=fileStr,
            infoId=ziXunId,
            url=response.url,
            pdf_type=pdf_type,
            downloadFileName=downloadFileName,
            province_type='shanxi',
        )
        self.db.save(itemSave)
        try:
            for unlinkStr in fileNames:
                os.remove(unlinkStr)
        except Exception as e:
            pass

    def getExcelData(self, fileName):
        excelStr = ""
        sheetCount = 1
        workbook = xlrd.open_workbook(fileName)
        for sheet in workbook.sheets():
            excelStr += "第" + str(sheetCount) + "sheet：\n"
            nrows = sheet.nrows
            for i in range(0, nrows):
                rowValues = sheet.row_values(i)
                for item in rowValues:
                    excelStr += str(item) + " "
                excelStr += "\n"
            sheetCount += 1
        return excelStr

    def parse_page(self, response):
        pass

    def getExtFromHref(self, url):
        ext = ""
        if url[-4] == ".":
            ext = url[-3:]
        if url[-5] == ".":
            ext = url[-4:]
        return ext
