import scrapy
from fundSpider.items import FundspiderItem
from config import *
import datetime
from selenium import webdriver

# 创建爬虫类 并且继承自scrapy.Spider --> 爬虫最基础的类
# 在爬虫跑起来的时候，将启动一个浏览器，一个spider开一个浏览器
class FundSpider(scrapy.Spider):
    # 爬虫名字,必须唯一,不能重名
    name = 'fund'
    # 允许采集的域名
    allowed_domains = ['eastmoney.com']
    # 开始采集的网站
    start_urls = [fund_url[0]]

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        # 使用selenium操作谷歌浏览器
        options = webdriver.ChromeOptions()

        # 禁用操作是为了减少selenium的资源耗用，加速scrapy
        # 禁用图片和css加载
        prefs = {
            "profile.managed_default_content_settings.images": 2,
            'permissions.default.stylesheet': 2
        }
        options.add_experimental_option("prefs", prefs)
        # 禁用gpu加速
        options.add_argument('--disable-gpu')
        # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
        options.add_argument('--headless')
        # 以最高权限运行
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')

        self.browser = webdriver.Chrome(chrome_options=options)

    def closed(self, reason):
        # 在爬虫关闭后，关闭浏览器的所有tab页，并关闭浏览器
        self.browser.quit()

    # 解析响应数据,提取数据或网址等.
    # response就是网页源码
    def parse(self, response):
        #urls = fund_url[1:]
        for url in fund_url:
            yield scrapy.Request(url=url, callback=self.detail_parse)

    # 提取详情页数据，组成成一个item
    def detail_parse(self, response):
        item = FundspiderItem()
        item['crawl_date'] = datetime.date.today()
        item['fund'] = response.xpath('//*[@id="bodydiv"]/div[8]/div[3]/div[1]/div[1]/div[1]/h4/a/text()').get()
        item['scale'] = response.xpath('//*[@id="bodydiv"]/div[8]/div[3]/div[1]/div[2]/p/label[5]/span/text()').get().strip().split()[0]
        # 以下内容需要动态加载
        table = response.xpath('//*[@id="cctable"]/div/div/table')
        trs = table[0].xpath('//tbody/tr')

        code = []
        stock = []
        ratio = []
        for tr in trs[1:]:
            code.append(tr.xpath('td[2]/a/text()').get())
            stock.append(tr.xpath('td[3]/a/text()').get())
            ratio.append(tr.xpath('td[7]/text()').get())

        item['code'] = code
        item['stock'] = stock
        item['ratio'] = ratio
        print(item['fund']+"抓取完成")

        yield item

# # xpath调试代码
# from selenium import webdriver
# from selenium.webdriver.chrome.options import Options
# from lxml import etree
# import requests
#
# chrome_options = Options()
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
# chrome_options.add_argument('--headless')
# browser = webdriver.Chrome(chrome_options=chrome_options)
#
# # 访问我们的目标网址
# browser.get("http://fundf10.eastmoney.com/ccmx_161005.html")
#
# # 获取渲染后的html页面
# html = browser.page_source
#
# def print_element(element):
#     print(etree.tostring(element, encoding=str, pretty_print=True))
#
# content = etree.HTML(html) # 分析html，返回DOM根节点
# fund = content.xpath('//*[@id="bodydiv"]/div[8]/div[3]/div[1]/div[1]/div[1]/h4/a/text()')
# scale = content.xpath('//*[@id="bodydiv"]/div[8]/div[3]/div[1]/div[2]/p/label[5]/span/text()')[0].strip().split()[0]
# table = content.xpath('//*[@id="cctable"]/div/div/table')
# print_element(table[0])
#
# trs = table[0].xpath('//tbody/tr')
# print_element(trs[0])
#
# for tr in trs[1:]:
#     code = tr.xpath('td[2]/a')
#     print_element(code[0])
#     print("-"*40)