import logging


import scrapy
import json
import js2py
from FetchFund.items import FetchfundItem
import logging

class FetchFundSpider(scrapy.Spider):
    name = 'FetchFund'
    allowed_domains = ['eastmoney.com']

    start_urls = ['http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?&page=1,200']
    urls = 'http://fund.eastmoney.com/Data/Fund_JJJZ_Data.aspx?&page=%d,200'

    curpage = 1
    pages = 0 #总页数
    record = 0 #总记录属

    def parse(self, response):
        data = response.body.decode('utf-8')
        #返回的数据格式为js格式
        jsContent = js2py.eval_js(data)
        datas = jsContent['datas']
        for data in datas:
            temp = {}
            temp['fund_code'] = data[0]
            temp['fund_name'] = data[1]
            temp['fund_link'] = "http://fund.eastmoney.com/"+data[0]+".html"
            detail_link = temp['fund_link']

            yield scrapy.Request(
                url=detail_link,
                meta={'temp': temp},
                callback=self.parse_detail,
            )

        #next page
        if self.record == 0:
            self.record =int(jsContent['record'])
        if self.pages == 0:
            self.pages =int(jsContent['pages'])

        if self.curpage < self.pages:
            self.curpage += 1
            new_url = format( self.urls % self.curpage)
            logging.info("current page:%s",self.curpage)
            yield scrapy.Request(new_url, callback=self.parse)



    def parse_detail(self,response):
        temp = response.meta['temp']

        # 判断//*[@id="body"]/div[11]是否为多余的信息div,
        # 如果是，内容可能为：'<div class="wrapper"><div class="gginfoTip"><small> </small>本类份额赎回时，管理人将收取持有期收益率超过5%以上部分的20%作为业绩报酬，具体可见基金合同。</div></div>'
        # 要从//*[@id="body"]/div[12]取数据
        index=11
        tmp_text = response.xpath('//*[@id="body"]/div[11]/div/div').extract_first()
        if tmp_text is None:
            index=12

        ## 取页面main frame部分，左上角主要信息，包含：单位净值、累积净值、近1月、近3月、近6月、近1年、近3年、成立来
        # //*[@id="body"]/div[11]/div/div/div[3]/div[1]/div[1]/dl[1]
        # dl[1]  单位净值 近1月 近1年
        # dl[2]  累积净值 近3月 近3年
        # dl[2]  [空白] 近6月 成立来
        temp['fund_current_value'] =response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[1]/dd[1]/span[1]/text()'.format(index)).extract_first()
        history_link=response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[1]/dt/p/span/span/a/@href'.format(index)).extract_first()

        tmp_text=response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[1]/dt/p/text()'.format(index)).extract_first()
        if tmp_text is not None:
            temp['fund_current_date']= tmp_text.strip(')')
        else:
            temp['fund_current_date']=''

        temp['fund_total_value'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[2]/dd[1]/span[1]/text()'.format(index)).extract_first()

        temp['one_month_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[1]/dd[2]/span[2]/text()'.format(index)).extract_first()
        temp['three_month_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[2]/dd[2]/span[2]/text()'.format(index)).extract_first()
        temp['six_month_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[3]/dd[2]/span[2]/text()'.format(index)).extract_first()
        temp['one_year_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[1]/dd[3]/span[2]/text()'.format(index)).extract_first()
        temp['three_year_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[2]/dd[3]/span[2]/text()'.format(index)).extract_first()
        temp['total_rate'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[1]/dl[3]/dd[3]/span[2]/text()'.format(index)).extract_first()

        ##取页面main frame部分，左上角主要信息，包含：类型、规模、基金经理、成 立 日、管 理 人、基金评级
        # //*[@id="body"]/div[11]/div/div/div[3]/div[1]/div[2]/table
        # table：
        # 第一行tr[1] 3个td：类型   规模   基金经理
        # 第二行tr[2] 3个td：成立日  管理人  基金评级
        #
        temp['fund_type'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[2]/table/tr[1]/td[1]/a/text()'.format(index)).extract_first()

        tmp_text = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[2]/table/tr[1]/td[2]/text()'.format(index)).extract_first()
        if tmp_text is not None:
            temp['fund_size']=tmp_text.strip(': ')
        else:
            temp['fund_size'] = ''

        temp['fund_manager'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[2]/table/tr[1]/td[3]/a/text()'.format(index)).extract_first()

        tmp_text = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[2]/table/tr[2]/td[1]/text()'.format(index)).extract_first()
        if tmp_text is not None:
            temp['fund_start_time']=tmp_text.strip(': ')
        else:
            temp['fund_start_time'] = ''

        temp['fund_level'] = response.xpath('//*[@id="body"]/div[{}]/div/div/div[3]/div[1]/div[2]/table/tr[2]/td[3]/div/text()'.format(index)).extract_first()

        #下面是取历史净值记录，暂时去掉必须设置headers postman已测试
        # headers = {
        #     'Referer': history_link,
        # }
        # yield scrapy.Request(
        #     url="http://api.fund.eastmoney.com/f10/lsjz?fundCode={}&pageIndex={}&pageSize={}".format(temp['fund_code'],1,5000),
        #     headers=headers,
        #     meta={'temp':temp},
        #     callback=self.parse_history
        # )

        items = FetchfundItem()

        items['fund_code'] = temp['fund_code']
        items['fund_name'] = temp['fund_name']
        items['fund_link'] = temp['fund_link']

        items['fund_current_value'] = temp['fund_current_value']
        items['fund_current_date'] = temp['fund_current_date']
        items['fund_total_value'] = temp['fund_total_value']
        items['one_month_rate'] = temp['one_month_rate']
        items['three_month_rate'] = temp['three_month_rate']
        items['six_month_rate'] = temp['six_month_rate']
        items['one_year_rate'] = temp['one_year_rate']
        items['three_year_rate'] = temp['three_year_rate']
        items['total_rate'] = temp['total_rate']

        items['fund_start_time'] = temp['fund_start_time']
        items['fund_manager'] = temp['fund_manager']
        items['fund_size'] = temp['fund_size']
        items['fund_type'] = temp['fund_type']
        items['fund_level'] = temp['fund_level']

        yield items

    ##取历史净值,数据量太大，暂时不用
    # def parse_history(self,response):
    #     temp = response.meta['temp']
    #     history_value = []
    #     dict_data = json.loads(response.body.decode())
    #     for data in dict_data['Data']['LSJZList']:
    #         # tp = {}
    #         # tp['nv_date'] = data['FSRQ'] #净值日期
    #         # tp['na_value'] = data['DWJZ'] #单位净值
    #         # tp['cna_value'] = data['LJJZ'] #累计净值ljjz
    #         # tp['fund_code'] = temp['fund_code'] #基金编号
    #         # history_value.append(tp)
    #         history_value.append((temp['fund_code'],data['FSRQ'],data['DWJZ'],data['LJJZ']))
    #
    #     items = FetchfundItem()
    #
    #     items['fund_code'] = temp['fund_code']
    #     items['fund_name'] = temp['fund_name']
    #     items['fund_link'] = temp['fund_link']
    #
    #     items['fund_current_value'] = temp['fund_current_value']
    #     items['fund_current_date'] = temp['fund_current_date']
    #     items['fund_total_value'] = temp['fund_total_value']
    #     items['one_month_rate'] = temp['one_month_rate']
    #     items['three_month_rate'] = temp['three_month_rate']
    #     items['six_month_rate'] = temp['six_month_rate']
    #     items['one_year_rate'] = temp['one_year_rate']
    #     items['three_year_rate'] = temp['three_year_rate']
    #     items['total_rate'] = temp['total_rate']
    #
    #     items['fund_start_time'] = temp['fund_start_time']
    #     items['fund_manager'] = temp['fund_manager']
    #     items['fund_size'] = temp['fund_size']
    #     items['fund_type'] = temp['fund_type']
    #     items['fund_level'] = temp['fund_level']
    #
    #     items['history_value'] =history_value
    #
    #     self.end_time = time.perf_counter()
    #     logging.info("takes ", (self.end_time - self.start_time) * 1000 , " ms")
    #     yield items