#!/usr/bin/env python
# encoding: utf-8

"""
@version: 0.1
@author: Kouen
@license: Apache Licence
@email: jobkouen@outlook.com
@software: PyCharm Community Edition
@file: stock_spiders.py
@time: 2016/12/30 下午2:32
@简介:
    
"""
import os
import sys
import json

sys.path.insert(0, os.getcwd()[:os.getcwd().find("AnalySignalSystem")]+"AnalySignalSystem")

from scrapy.contrib.spiders import Spider
from source.Report.Scrapy.sina.sina.items import SinaItem
import scrapy


class SinaFinan(Spider):
    name = 'sina_stock'

    def start_requests(self):
        codes = set(json.load(open('/data/stock_filter.json')))
        all_codes = json.load(open('/data/stock_code.json'))
        for i in all_codes:
            name, code = i['name'], i['code']
            if code in codes:
                continue

            item = SinaItem()
            item['name'] = name
            item['code'] = code
            if 'sz' in i['url']:
                item['market'] = 'sz'
                yield scrapy.Request(
                    url=f"http://finance.sina.com.cn/realstock/company/sz{code}/nc.shtml",
                    callback=self.stock_parse,
                    meta={'item': item}
                )
            else:
                item['market'] = 'sh'
                yield scrapy.Request(
                    url=f"http://finance.sina.com.cn/realstock/company/sh{code}/nc.shtml",
                    callback=self.stock_parse,
                    meta={'item': item}
                )

        for sz in response.xpath('//div[@id="quotesearch"]/ul[2]/li/a/text()').extract():
            name, code = sz.split('(')
            code = code.replace(')', '')
            if code in codes:
                continue

            item = SinaItem()
            item['name'] = name
            item['code'] = code
            item['market'] = 'sz'

            yield scrapy.Request(
                url=f"http://finance.sina.com.cn/realstock/company/sz{code}/nc.shtml",
                callback=self.stock_parse,
                meta={'item': item}
            )

    def stock_parse(self, response):
        item = response.meta['item']
        item['profession'] = response.xpath('//div[@class="com_overview blue_d"]/descendant::a/text()').extract()
        for i in response.xpath('//div[@class="sec_cont row_num_4"]/descendant::a'):
            table = i.xpath('text()').extract()[0]
            url = i.xpath('@href').extract()[0]
            if table in (
                u"财务摘要", u"财务指标", u"资产负债表", u"利润表",
                u"现金流量表", u"资产减值准备明细表"
            ):
                if table == u"财务摘要":
                    yield scrapy.Request(
                        url=url,
                        callback=self.finan_easy_parse,
                        meta={'item': item}
                    )
                else:
                    yield scrapy.Request(
                        url=url,
                        callback=self.finan_parse,
                        meta={'item': item}
                    )

    def finan_parse(self, response):
        for url in response.xpath('//div[@id="con02-1"]/child::table')[0].xpath('descendant::a/@href').extract():
            yield scrapy.Request(
                url=url,
                callback=self.finan_histroy_parse,
                meta={'item': response.meta['item']}
            )

    def finan_histroy_parse(self, response):
        item = response.meta['item']
        dt = dict()
        toKey = dict()

        r = 0
        for k in response.xpath('//div[@id="con02-1"]/child::table')[1].xpath('child::tbody/child::tr')[0].xpath('child::td/text()').extract():
            dt.setdefault(k, dict())
            toKey[r] = k
            r += 1

        for i in response.xpath('//div[@id="con02-1"]/child::table')[1].xpath('child::tbody/child::tr')[1:]:
            if len(i.xpath('child::td').extract()) > 1:
                for pos, v in enumerate(i.xpath('descendant::*/text()').extract()[1:]):
                    try:
                        fields = i.xpath('descendant::a/text()').extract()[0].split(u'、')
                        if len(fields[0]) == 1:
                            dt[toKey[pos]]["".join(fields[1:])] = v
                        else:
                            dt[toKey[pos]]["".join(fields)] = v

                    except (Exception, ):
                        continue
        item['financial'] = dt

        return item

    def finan_easy_parse(self, response):
        for i in response.xpath('//a'):
            if i.xpath('text()').extract() and i.xpath('text()').extract()[0] == u'点击查看全部数据内容':
                url = "http://vip.stock.finance.sina.com.cn" + i.xpath('@href').extract()[0]
                yield scrapy.Request(
                    url=url,
                    callback=self.finan_easy_all_parse,
                    meta={'item': response.meta['item']}
                )

    def finan_easy_all_parse(self, response):
        dt = dict()
        tmp = dict()
        k = ''
        for i in response.xpath('//table[@id="FundHoldSharesTable"]/child::tr'):
            if i.xpath('child::td')[0].xpath('child::a/@name').extract():
                if k and tmp:
                    dt[k] = tmp
                    tmp = dict()
                k = i.xpath('child::td')[0].xpath('child::a/@name').extract()[0]
            else:
                if i.xpath('descendant::*/text()').extract():
                    field = i.xpath('descendant::*/text()').extract()[0]
                    value = i.xpath('descendant::*/text()').extract()[1]
                    tmp[field] = value  # 这里需要处理，转换成数值（元或者万元）

        item = response.meta['item']
        item['financial'] = dt

        return item

if __name__ == '__main__':
    pass
    # print os.getcwd()[:os.getcwd().find("AnalySignalSystem")]+"AnalySignalSystem"
