from scrapy.spiders import Spider
from scrapy import Request
import pymongo
import unidecode
import logging
from bs4 import BeautifulSoup
from ..items import NewsItem


class HSNewSpider(Spider):
    name = 'hsnews'
    newsurl = 'http://vip.stock.finance.sina.com.cn/corp/view/vCB_AllNewsStock.php?symbol={}&Page={}'
    custom_settings = {
        'ITEM_PIPELINES': {
            'newscrawler.pipelines.HSNewsPipeline': 300
        }
    }

    def start_requests(self):
        client = pymongo.MongoClient()
        for i in client.hs.stocklist.find():
            yield Request(url=self.newsurl.format(i['symbol'], 1), callback=self.parse,
                          meta={'symbol': i['symbol']})

    def parse(self, response):
        symbol = response.meta.get('symbol')
        ul = response.xpath('//*[@class="datelist"]/ul')
        content = ul.extract_first()[5:-5].strip()
        for line in content.split('<br>'):
            l = line.strip()
            if len(l) <= 0:
                continue
            item = self.to_item(l)
            if item is not None:
                item['symbol'] = symbol
                yield item
        try:
            nexturl = response.xpath('//*[text()="下一页"]/@href').extract_first()
            yield Request(url=nexturl, callback=self.parse,
                          meta=response.meta)
        except Exception as e:
            logging.error(e)

    def to_item(self, line):
        try:
            a = line.index('<a')
            b = line.index('href=')
            c = line.index('</a>')
            dt = line[:a]
            href, title = line[b:c].split('>')
            item = NewsItem()
            item['datetime'] = unidecode.unidecode(dt).strip()
            item['href'] = href[6:-1]
            item['title'] = title
            return item
        except Exception as e:
            logging.error(line)
            logging.error(e)
            return None


class HKNewsSpider(Spider):
    name = 'hknews'
    newsurl = 'http://stock.finance.sina.com.cn/hkstock/go/CompanyNews/page/{1}/code/{0}.html'
    custom_settings = {
        'ITEM_PIPELINES': {
            'newscrawler.pipelines.HKNewsPipeline': 300
        }
    }

    def start_requests(self):
        client = pymongo.MongoClient()
        for i in client.hk.stocklist.find():
            yield Request(url=self.newsurl.format(i['symbol'], 1), callback=self.parse,
                          meta={'symbol': i['symbol']})

    def parse(self, response):
        symbol = response.meta.get('symbol')
        try:
            list = response.xpath('//*[@class="list01"]/li').extract()
            for li in list:
                item = self.to_item(li)
                if item is not None:
                    item['symbol'] = symbol
                    yield item
            nexturl = response.xpath('//*[text()="下一页"]/@href').extract_first()
            if nexturl != 'javascript:volid(0);':
                yield Request(url=nexturl, callback=self.parse, meta=response.meta)
        except Exception as e:
            logging.error(e)

    def to_item(self, data):
        try:
            soup = BeautifulSoup(data, 'lxml')
            dt = soup.span.get_text()
            title = soup.a.get_text()
            href = soup.a['href']
            item = NewsItem()
            item['datetime'] = dt
            item['title'] = title
            item['href'] = href
            return item
        except Exception as e:
            logging.error(e)
            return None
