import scrapy
import items
import pipelines
import os
import random
import json
import datetime

from Core.Config import Config
import Core.Gadget as Gadget

config = Config(os.getcwd() + "\\..\\config.json")

logger = config.Logger("SpiderXueqiu")
database = config.DataBase("MySQL")
realtime = config.RealTime()

jsonObject = json.load(open(os.getcwd() + "\\agentList.json", 'r', encoding='utf-8'))
agentList = jsonObject["AgentList"]

jsonObject = json.load(open(os.getcwd() + "\\cookieList.json", 'r', encoding='utf-8'))
cookieList = jsonObject["CookieList"]

# http://www.yanglee.com/Action/ProductAJAX.ashx?mode=statistics&pageSize=40&pageIndex=1&conditionStr=producttype%3A3&start_released=&end_released=&orderStr=1&ascStr=ulup&_=1578374463400
# http://www.yanglee.com/Action/ProductAJAX.ashx?mode=statistics&pageSize=40&pageIndex=1&conditionStr=producttype%3A4&start_released=&end_released=&orderStr=1&ascStr=ulup&_=1578374463398
# http://www.yanglee.com/Action/ProductAJAX.ashx?mode=statistics&pageSize=40&pageIndex=2&conditionStr=producttype%3A4&start_released=&end_released=&orderStr=1&ascStr=ulup&_=1578374463404

# 自行修改需爬取页面数量
crawl_pages = 178
product_type = "producttype%3A4" #其他理财
seriel_num = 1578374463408
ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36"
cookie = "safedog-flow-item=7C90834051ABA6993869AB41F95C1C; UM_distinctid=16f7e7211b720d-0d4668cc274ffe-6701b35-149fb0-16f7e7211b83de; CNZZDATA1309589=cnzz_eid%3D2069264835-1578377075-http%253A%252F%252Fwww.yanglee.com%252F%26ntime%3D1578377075"


class YangleeSpider(scrapy.Spider):
    #
    name = "Yanglee"
    allowed_domains = ["www.yanglee.com"]
    start_urls = ['http://www.yanglee.com/Product/Index.aspx']
    #
    custom_settings = {
        "COOKIES_ENABLED": False,
        "DOWNLOAD_DELAY": 1,
        "PROXIES": [
            {'ip_port': '119.4.172.166:80', 'user_pass': ''},
            {'ip_port': '121.206.132.35:8118', 'user_pass': ''}
        ],
        "HTTPERROR_ALLOWED_CODES" : [400,403],
        'ITEM_PIPELINES': {
            'pipelines.MySQLDBPipeline': 0  # 和开头的引用要匹配呼应
        },
        "DATABASE_ADDR" : database.address,
        "DATABASE_PORT": database.port,
        "DATABASE_USER": database.username,
        "DATABASE_PASS": database.password
    }

    def Header(self):
        # ua = random.choice(agentList)  # 随机抽取User-Agent
        headers = {}
        headers['X-Requested-With'] = 'XMLHttpRequest'
        headers['Host'] = 'www.yanglee.com'
        headers['Connection'] = 'keep-alive'
        headers['Accept'] = '*/*'
        headers['Referer'] = 'http://www.yanglee.com/Product/'
        headers['Cookie'] = cookie
        headers['User-Agent'] = ua
        #
        return headers

    # "2020/1/7 0:00:00"
    def Parse_DateTime(self, strDateTime):
        format = "%Y/%m/%d %H:%M:%S"
        return datetime.datetime.strptime(strDateTime, format)

    #
    def start_requests(self):
        #
        print("Start To Crawl")
        print("Existing settings: %s" % self.settings.attributes.keys())

        #for url in self.start_urls:
        #    yield self.make_requests_from_url(url)

        str1 = "http://www.yanglee.com/Action/ProductAJAX.ashx?mode=statistics&pageSize=40&pageIndex="
        str2 = "&conditionStr="
        str3 = "&start_released=&end_released=&orderStr=1&ascStr=ulup&_="

        # ---Loop Pages---
        count = 0
        for iPage in range(crawl_pages):
            page = iPage + 1
            count += 1

            if page > 121:
                continue

            print("Request Page", page)
            headers = self.Header()
            url = str1 + str(page) + str2 + product_type + str3 + str(seriel_num)
            param = {"Page": page}
            request = scrapy.Request(url, method="GET", headers=headers, callback=lambda response, param=param: self.Parse_ProductList(response,param))
            yield request


    def parse(self, response):
        pass


    def Parse_ProductList(self, response, param):
        #
        print("Request Product List Page", param["Page"])
        #
        try:
            html = json.loads(response.text)
        except Exception as e:
            print("Parse Error not Json", e)
            return

        total = html['total']
        page_count = html['PageCount']
        results = html['result']
        #
        documents = {}
        documents["Islist"] = []
        documents["DataBaseName"] = "FixIncome_Similar"
        documents["TableName"] = "Licai_Yanglee"
        #
        for r in results:
            document = {}
            if r["Title"] == "":
                print("No Name")
                continue
            if r["released"] == "":
                print(r["Title"], "released Error")
                continue

            document["Name"] = r["Title"]
            dt = self.Parse_DateTime(r["released"])

            # ---缺失数值，补充Null---
            for key, value in r.items():
                if value == "":
                    print(document["Name"], key, "Error")
                    r[key] = None

            document["ID"] = r["ID"]
            document["Name"] = r["Title"]
            document["Product_Type"] = r["producttype"]
            document["Issuer"] = r["issuers"]
            document["Term"] = r["PeriodTo"]
            document["Minimum_Subscribe"] = r["StartPrice"]
            document["Invest_Type"] = r["moneyinto"]
            document["Expected_Return"] = r["EstimatedRatio1"]
            document["Status"] = r["status"]
            #
            document["DateTime"] = dt
            document["Date"] = dt.date()
            document["Release_Date"] = dt.date()
            #
            document["Key"] = document["Name"] + "_" + Gadget.ToDateString(dt)
            #
            documents["Islist"].append(document)
        #
        yield documents


if __name__ == '__main__':
    from scrapy.cmdline import execute
    execute(['scrapy', 'runspider', 'Spider_Yanglee.py'])