import scrapy
import items
import pipelines
import os
import json
import datetime
import Core.MySQLDB as MySQLDB
import Core.MongoDB as MongoDB
from Core.Config import Config

#
# pathFilename = os.getcwd() + "\..\config.json"
# cfgFile = json.load(open(pathFilename, 'r', encoding='utf-8'))
#
# # addressPort = cfgFile["MySQLDBAddressPort"].split(":")
# # database = MySQLDB.MySQLDB(addressPort[0], addressPort[1], "root", "kirk2019")
#
# addressPort = cfgFile["MongoDBAddressPort"].split(":")
# database = MongoDB.MongoDB(addressPort[0], addressPort[1])

config = Config(os.getcwd() + "\\..\\config.json")

logger = config.Logger("Spider_EMNews")
database = config.DataBase("MySQL")
realtime = config.RealTime()


#
jsonObject = json.load(open(os.getcwd() + "\\agentList.json", 'r', encoding='utf-8'))
agentList = jsonObject["AgentList"]

#
jsonObject = json.load(open(os.getcwd() + "\\cookieList.json", 'r', encoding='utf-8'))
cookieList = jsonObject["CookieList"]

# database.creatIndex("Text", "News", "URL")
# database.creatIndex("Text", "News", "DateTime")

class EastMoneyNewsSpider(scrapy.Spider):
    #
    filter = {"limit": 1000, "orderby": {"DateTime": -1}}
    historyNews = database.Find("Text", "News", filter=filter)
    historyNewUrls = []
    for news in historyNews:
        # print(news["DateTime"], news["Title"])
        historyNewUrls.append(news["URL"])
        # if news["Title"] == "大富科技：基站射频产品主要应用于2G~5G全系列移动通信系统":
        #    a = 0

    #
    name = "EastMoneyNews"
    # allowed_domains = ["www.eastmoney.com"]

    # ---20 Page to load---
    maxPage = 20
    livelist7x24_url_part1 = "http://newsapi.eastmoney.com/kuaixun/v1/getlist_102_ajaxResult_50_"
    livelist7x24_url_part2 = "_.html?r=0.6814634640423496&_=1549948187564"
    start_urls = []
    for i in range(maxPage):
        start_urls.append(livelist7x24_url_part1 + str(i+1) + livelist7x24_url_part2)

    #
    # addressPort = cfgFile["MySQLDBAddressPort"].split(":")

    custom_settings = {
        "COOKIES_ENABLED": False,
        "DOWNLOAD_DELAY": 0.5,
        "PROXIES": [
            {'ip_port': '119.4.172.166:80', 'user_pass': ''},
            {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ],
        "HTTPERROR_ALLOWED_CODES" : [400,403],
        'ITEM_PIPELINES': {
            # 'pipelines.MongoDBPipeline': 0  # 和开头的引用要匹配呼应
            'pipelines.MySQLDBPipeline': 0  # 和开头的引用要匹配呼应
        },
        "DATABASE_ADDR" : database.address,
        "DATABASE_PORT": database.port,
        "DATABASE_USER": database.username,
        "DATABASE_PASS": database.password
    }


    def parse(self, response):

        try:
            # "var ajaxResult="
            # Remove Unnecessary Text
            text = response.text[15:]
            jsonObject = json.loads(text)
        except Exception as e:
            print("Parse Error not Json", e)
            return

        liveList = jsonObject['LivesList']
        count = 0
        for article in liveList:
            count += 1
            #if count > 30:
            #    break
            #if article["title"] != "国家卫生健康委关于“静注人免疫球蛋白艾滋病抗体阳性”有关问题的回应":
            #    continue

            param = {}
            param["Title"] = article["title"]
            param["URL"] = article["url_unique"]
            param["DateTime"] = article["showtime"]
            param["NewsType"] = article["newstype"]
            url = param["URL"]

            #
            if url in self.historyNewUrls:
               continue

            # if article["id"] == "201902151044736145":
            #    a = 1

            if article["title"] == "16日起！河北省启动重污染天气Ⅱ级应急响应":
               a = 1

            print("Yield", article["title"], article["showtime"], url)
            article["newstype"] = int(article["newstype"])
            #
            if article["newstype"] == 1:
                # 存在原文
                yield scrapy.Request(url, callback=lambda response, param=param: self.parseArticle(response, param))
            else:
                # 不存在原文
                item = items.NewsItem()
                item["DateTime"] = datetime.datetime.strptime(param["DateTime"], "%Y-%m-%d %H:%M:%S")
                item["Title"] = param["Title"]
                item["URL"] = param["URL"]
                item["Author"] = None
                item["Source"] = None
                item["NewsType"] = param["NewsType"]
                item["Content"] = article["digest"]
                item["Type"] = "News"
                yield item


    def parseArticle(self, response, param):
        print("Parse Article", param["Title"], param["DateTime"])
        print(param["URL"])

        # Parse Time
        # reportTime = response.xpath('//div[@class="newsContent"]//div[@class="time"]/text()').extract()
        # if len(reportTime) > 0:
        #    reportTime = reportTime[0]
        #    reportTime = General.ProcessEastMoneyDateTime(reportTime)
        #else:
        #    reportTime = None

        # Parse Author & Issuer
        author = response.xpath('//div[@class="newsContent"]//div[@class="author"]/text()').extract()
        if len(author) > 0:
            author = author[0]
            # '作者：刘灿邦'
            if "作者：" in author:
                author = author.replace("作者：", "")
        else:
            author = None

        # Source is Attribute, Not Text
        source = response.xpath('//div[@class="newsContent"]//div[@class="source data-source"]/@data-source').extract()
        if len(source) > 0:
            source = source[0]
        else:
            source = None

        # Rebuild the Content
        rawReportContent = response.xpath('//div[@id="ContentBody"]/p').extract()
        reportContent = ""
        for para in rawReportContent:
            para = para.replace("<p>", "")
            para = para.replace("</p>", "")
            if para == "":
                continue
            reportContent = reportContent + para + "\r\n"

        # Prepare "Item"
        item = items.NewsItem()
        item["DateTime"] = datetime.datetime.strptime(param["DateTime"], "%Y-%m-%d %H:%M:%S")
        item["Title"] = param["Title"]
        item["URL"] = param["URL"]
        item["NewsType"] = param["NewsType"]
        item["Author"] = author
        item["Source"] = source
        item["Content"] = reportContent
        item["Type"] = "News"

        #
        yield item
