import scrapy
import os
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from time import sleep
from selenium.webdriver.support.wait import WebDriverWait
from webfin.items import stockItem, sentiItem
from datetime import date
import pymysql

class SentimentSpider(scrapy.Spider):
    name = 'senti'
    allowed_domains = ["*"]
    start_urls = ['http://guba.eastmoney.com']

    def __init__(self):
        super().__init__()
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('log-level=2')
        chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
        self.driver = webdriver.Chrome(chrome_options=chrome_options)

    def parse(self, response):
        s = ""
        item = sentiItem()
        self.driver.get(response.url)
        raw_all = self.driver.find_elements_by_xpath("//ul[@class='newlist']/li/span/a")

        for r, i in zip(raw_all, range(len(raw_all))):
            if (i < 30):
                s += r.text

        item['name'] = '\'' + str(date.today()) + '\''
        item['str'] = '\'' + s + '\''

        yield item
