# -*- coding: utf-8 -*-

import scrapy
import re
from huibo_spider.items import HuiboItem

class HuiboSpider(scrapy.Spider):
    name = 'huibo'
    base_domain = 'hibor.com.cn'
    base_url = 'http://hibor.com.cn'
    allowed_domains = [base_domain]
    start_url = 'http://www.hibor.com.cn/microns_1_136.html'
    start_urls = [start_url]

    def parse(self,response):
        login_cookies = { 'MBname':'yz001007', 'MBpermission':'0', 'Hm_lpvt_d554f0f6d738d9e505c72769d450253d':'1577245270'}
        page_cookies = {'ASPSESSIONIDSSTTBTSD':'INOAGAGBMPNDHPPJCFCGHJHN', 'safedog-flow-item':'30037EE158BE25747039BF482AA4B86A', 'UM_distinctid':'16f3adaaa2a85b-047fbd3977a579-6701b35-1fa400-16f3adaaa2bb91', 'Hm_lvt_d554f0f6d738d9e505c72769d450253d':'1576737472,1577166595,1577240351', 'robih':'MDxWpPoOrQqQnNqP', 'MBpermission':'0', 'MBname':'yz001007', 'ASPSESSIONIDQARSDTSD':'LIHCJEGBJKOPIPLAHKILIPHF', 'did':'67A671BFE', 'ASPSESSIONIDCCCASTQB':'JNFBNIGBAJHDFPAHPJCOGEAL', 'CNZZDATA1752123':'cnzz_eid%3D1382382372-1577235496-null%26ntime%3D1577243245', 'ASPSESSIONIDQQQTDSSB':'FLJONMGBFINOGNNAMMCPAIEE', 'Hm_lpvt_d554f0f6d738d9e505c72769d450253d':'1577245322'}
        selector = response.xpath('//td[@class="td_spantxt"]')
        print(selector)
        for sel in response.xpath('//td[@class="td_spantxt"]/span[5]'):
            sel_text = sel.xpath('/text()')
            print(sel_text)
            page_str = sel.get()
            page_num = page_str.split('：')[1]
            index = page_num.index("页")
            page_num = page_num[0:index-1]
            if int(page_num) >= 20:
                item = HuiboItem()
                item['page_num'] = page_num
                date = sel.xpath('../span[1]/text()').get()
                item['date'] = date
                title_node = sel.xpath('../../preceding-sibling::tr[2]/td/span[2]/a')
                text = title_node.xpath('text()')[0].get()
                detail_url = title_node.attrib['href']
                stock_id = text.split('-')[2]
                detail_url = self.base_url + detail_url
                item['detail_url'] = detail_url
                item['stock_id'] = stock_id
                item['title'] = text
                request = scrapy.Request(url=detail_url,cookies=login_cookies,callback=self.parse_page)
                request.meta['item'] = item
                yield request
                #yield item

        next_url = response.xpath('//table[@class="fytab"]/tr/td/a[3]/@href').extract()
        print(next_url)
        if len(next_url)>0:
            next_page_url = self.base_url + next_url[0]
            yield scrapy.Request(url=next_page_url,cookies=page_cookies,callback=self.parse)

    def parse_page(self,response):
        share_node = response.xpath('//table[@class="btab"]//td[3]/span/text()')
        share_text = share_node.get()
        time = share_text.split(' ')[1]
        item = response.meta['item']
        item['share_time'] = time
        print(time)
        yield item
           
