from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import re
import os
import time
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')

def deal(s):
    s = re.sub('\n','@',s)
    return s

def get_mini_location(s):
    punc = ['省','市','区','县']
    i = 0
    l = len(s)
    ss = []
    while i < l:
        tp = ''
        while i < l and s[i] not in punc:
            tp += s[i]
            i+=1
        if i < l:
            tp += s[i]
        # print(tp)
        ss.append(tp)
        i+=1
    return ss[-1]


class collector:
    def __init__(self):
        self.dr = webdriver.Chrome(chrome_options=chrome_options)

    def get(self, url):
        self.dr.get(url)

    def save_csv(self, data, name, sep = ',', columns={}):
        if columns:
            df.rename(columns = columns, inplace = True)
        df.to_csv("%s/Data/%s.csv"%(os.path.dirname(__file__),name), index=False, sep = '\t')

    def GetPrice(self,latest_url='',maxPage=50):
        print('开始采集报价..........')
        self.get("http://www.shucai123.com/price.php")
        # 记录数据
        data = []
        page = 0
        end = 0
        # 循环
        while True:
            page+=1
            if page%5==0:
                print(page)
            if page > maxPage:
                break
            try:
                # 每一页
                items = self.dr.find_elements_by_xpath("/html/body/div[3]/table/tbody/tr")[1:]
                for item in items:
                    tds = item.find_elements_by_tag_name("td")
                    line = []
                    datet = tds[0].get_attribute("title")

                    # 组装需要的数据
                    
                    if datet:
                        line.append(datet.split()[0])
                    else:
                        line.append('')
                    line.append(tds[1].text)
                    line.append(tds[2].text)
                    line.append(float(re.findall(r"\d+\.?\d*",tds[3].find_element_by_tag_name('b').text)[0]))
                    line.append(tds[3].find_element_by_tag_name('p').text)
                    url = tds[5].find_element_by_tag_name('a').get_attribute("href")
                    line.append(url)
                    line.append(get_mini_location(line[1]))
                    line.append(int(time.time()))
                    print(url, '------' ,latest_url)
                    if url == latest_url:
                        end = 1
                        break
                    data.append(line)
                # 下一页
                self.dr.find_element_by_link_text("下一页").click()
                # 跳出结束条件
                if end:
                    break
            except Exception as e:
                print(e)
                break
            # 保存
        cols = ['Date','FullCity','Veg','Price','Details','Url','City','RecordTime']
        print('本次采集了',len(data),'条报价')
        return pd.DataFrame(data,columns=cols)

    def getArticleList(self,latest_url='', maxPage = 50):
        print('开始采集文章链接..........')
        self.get("http://www.shucai123.com/hangqing/")
        # 记录数据
        page = 0
        end = 0
        urlList = []
        # 循环
        while True:
            page+=1

            if page%5==0:
                print(page)
            if page > maxPage:
                break
            try:
            # 每一页
                items = self.dr.find_element_by_css_selector("#list").find_elements_by_tag_name("li")
                for item in items:
                    url = item.find_element_by_css_selector("h2 > a").get_attribute("href")
                    print(url, '------' ,latest_url)
                    if url == latest_url:
                        end = 1
                        break
                    urlList.append(url)

                # 下一页
                self.dr.find_element_by_link_text("下一页").click()
                # 跳出结束条件
                if end:
                    break
            except Exception as e:
                print(e)
                break

            # 保存
        return urlList
    def GetArticle(self,urlList,maxPage=500):
        print('开始采集文章..........')
        data = []
        page = 0
        for url in urlList:
            page+=1
            if page%5==0:
                print(page)
            if (page > maxPage):
                break
            try:
                self.get(url)
                line = []
                details = self.dr.find_element_by_css_selector("#op").text.split()[0]
                line.append(self.dr.find_element_by_css_selector("#l > div > h1").text)
                line.append(details.split("：")[1])
                line.append(deal(self.dr.find_element_by_css_selector("#mby").text))
                line.append(url)
                line.append(int(time.time()))
                data.append(line)
            except Exception as e:
                print(e)
                break
        cols = ['Title','Date','Text','Url','RecordTime']
        # colsDict = {}
        # for i,col in enumerate(cols):
        #     colsDict[i] = col
        print('本次采集了',len(data),'条舆情文章')
        return pd.DataFrame(data,columns=cols)
if __name__ == "__main__":
    coll = collector()
    df = pd.read_csv("%s/%s.csv"%(os.path.dirname(__file__),'Data/ArticleUrl'), sep='\t')
    coll.GetArticle([i[24:] for i in list(df['2'])])

