import time
from bs4 import BeautifulSoup
from selenium import webdriver
from eastmoney import config as cfg
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import TimeoutException

option = webdriver.ChromeOptions()
# option.add_argument('headless')
# option.add_argument("--proxy-server=" + random.choice(proxy_list))
# option.add_argument('user-agent=' + random.choice(headers_list))

#-----------------------------------------------------------------------------------------------------------------------
# # 用不到了,都是js渲染的
# def get_url_code_xxx(url, header):
#     try:
#         res = requests.get(url, headers=header)
#         if res.status_code == 200:
#             return res.text
#         return res.status_code
#     except ConnectionError as err:
#         cfg.logger.error(err)
#         return None
#-----------------------------------------------------------------------------------------------------------------------

def get_url_code(url, driver):
    try:
        page_sourcecode = driver.page_source
        return page_sourcecode
    except Exception as err:
        cfg.logger.error(err)
        return None

def get_concept_url(html):
    soup = BeautifulSoup(html, 'html.parser')
    concept_tbody = soup.select('#main-table > tbody > tr')
    concept_a = soup.select('#main-table > tbody > tr > td > a')
    concept_id = soup.select('#main-table > tbody > tr > td.listview-col-number')
    concept = []
    for tbody in concept_tbody:
        concept_a = tbody.select('td > a')[0].text
        concept_a_href = 'http://quote.eastmoney.com' + tbody.select('td > a')[0].attrs['href']
        concept_id = tbody.select('td.listview-col-number')[0].text
        concept.append([concept_id, concept_a, concept_a_href])
    return concept

# 得到所有的概念股的url链接
def get_url_list(url, driver):
    driver.get(url)
    html = get_url_code(url, driver)
    soup = BeautifulSoup(html, 'html.parser')
    concept_url = []
    try:
        page_count = int(soup.select('#main-table_paginate_page > a')[-1].text)
    except IndexError as err:
        cfg.logger.error('未能获取到概念页数, 重新获取')
        get_url_list(url, driver)

    for i in range(1, page_count+1):
        wait = WebDriverWait(driver, 10)
        concept_url.append(get_concept_url(html))
        if i < (page_count):
            input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#main-table_paginate > input')))
            submit = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#main-table_paginate > a')))
            input.clear()
            input.send_keys(i+1)
            submit.click()
            html = get_url_code(url, driver)
    return concept_url


def get_detail_info(url, each_concept, driver):
    driver.get(url)
    each_result = []
    para = 'body > div:nth-child(1) > div:nth-child(8) > div.report > div.main_box > div > div.RMBCont > div > div > p > a'
    wait = WebDriverWait(driver, 10)
    view_more = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, para)))
    view_more.click()
    # # 获取页数

    # html = driver.page_source
    # soup = BeautifulSoup(html, 'html.parser')
    # try:
    #     page = soup.select('#PageCont > a:nth-child(3)')
    # except Exception as err:
    #     cfg.logger.error(err)
    #     page = 1
        # 直接取此页数据就可以了


    time.sleep(2)
    print(page)
    return page



def main():
    url = cfg.index_url
    # header = cfg.header
    driver = webdriver.Chrome()

    # 每个概念股的id, url, 名称
    concept_url = get_url_list(url, driver)

    for row in range(len(concept_url)):
        for each in concept_url[row]:
            each_url = each[-1]
            each_concept = each[1]
            print(each_concept)
            each_result = get_detail_info(each_url, each_concept, driver)


    driver.close()
    return each_result

if __name__ == '__main__':
    each_result = main()
    # 少个selenium的403 和 动态代理















