from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import pandas as pd
import time
import csv
import os


def get_stockdata(browser, url, stockcode):
    url = url + stockcode + '.html'
    try:
        browser.set_page_load_timeout(5)
        browser.get(url)
    except TimeoutException:
        print('time out')
        return ''
    wait = WebDriverWait(browser, 5, 0.5)
    try:
        target = wait.until(ec.presence_of_element_located(
            (By.ID, 'content_zjlxtable')))  # 显式等待5秒，若5秒内加载出来，则成功，否则抛出超时异常
    except TimeoutException:
        print('time out')
        return ''
    return target.text


def data_cleaning(content_str):
    content_list = content_str.split('\n')
    del(content_list[0:2])  # 去掉前两行
    content_list.reverse()  # 按日期从旧到新
    data = [l.split(' ') for l in content_list]  # 将str构成的list转换为二维list
    for i in range(len(data)):
        for j in range(len(data[i])):
            data[i][j] = data[i][j].replace('%', '')
            if data[i][j].find('万') != -1:
                data[i][j] = str(round(float(data[i][j].replace('万', '')) * 10000))
            elif data[i][j].find('亿') != -1:
                data[i][j] = str(round(float(data[i][j].replace('亿', '')) * 100000000))
    return data


def create_csv(path, code, data):
    # data = [l.split(' ') for l in content_list]  # 将str构成的list转换为二维list
    with open(path, 'w', encoding='utf-8') as f:
        w = csv.writer(f)
        w.writerow(['date', 'price', 'percentage', 'zl', 'zl(%)', 'cdd',
                    'cdd(%)', 'dd', 'dd(%)', 'zd', 'zd(%)', 'xd', 'xd(%)'])  # 写入列名
        w.writerows(data)  # 写入数据
        print(code + ' is created successfully')


def add_csv(path, code, data):
    df = pd.read_csv(path)
    date = df.tail(1).iat[0, 0]  # 找到已保存的最后一天的日期
    for i in range(len(data) - 1, -1, -1):
        if data[i][0] == date:  # 找到需要更新的数据日期的起点
            with open(path, 'a') as c:
                w = csv.writer(c)
                # data = [l.split(' ') for l in data[i + 1:len(data)]]  # 将str构成的list转换为二维list
                # w.writerows(data)
                w.writerows(data[i + 1:len(data)])
            print('update ' + code)
            break
        if i == 0:
            print(code + ' current data is too old!')  # 已有数据断档太久了


def main():
    url = 'http://data.eastmoney.com/zjlx/'

    chrome_options = webdriver.ChromeOptions()
    # chrome_options.add_argument('--no-sandbox')
    # chrome_options.add_argument('--disable-gpu')
    # chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--headless')  # 无窗口
    browser = webdriver.Chrome(chrome_options=chrome_options)  # 浏览器启动

    with open('stockcode.txt', 'r') as f:
        stockcodes_str = f.readline()  # 读取全部的股票代码
        stockcodes = stockcodes_str.split(' ')

    if not os.path.exists('./data'):
        os.mkdir('./data')  # 如果没有data文件夹则创建

    for i, code in enumerate(stockcodes):
        
        if os.path.exists('./data/%s.csv' % code):
            print('%s is completed' % code)
            continue
        

        if i % 10 == 0:
            print(str(round(i / len(stockcodes) * 100, 2)) + '%' + ' is completed')

        content_str = get_stockdata(browser, url, code)  # get网页股票数据

        if content_str == '':
            print(code + ' content is empty')
            continue

        data = data_cleaning(content_str)  # 对数据进行清洗

        if not os.path.exists('./data/%s.csv' % code):
            create_csv('./data/%s.csv' % code, code, data)  # 创建csv存储数据
        else:
            add_csv('./data/%s.csv' % code, code, data)  # 向已有csv追加数据
        # time.sleep(0.5)

    browser.close()  # 浏览器关闭


if __name__ == '__main__':
    main()
