import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from tonghuashun import config as cfg
import time
import random



headers_list = cfg.user_agent
proxy_list =cfg.ip

option = webdriver.ChromeOptions()
# option.add_argument('headless')
option.add_argument("--proxy-server=" + random.choice(proxy_list))
option.add_argument('user-agent=' + random.choice(headers_list))




# 得到起始页的html
def get_index_page(url, header):
    try:
        res_html = requests.get(url, headers=header)
        if res_html.status_code == 200:
            return res_html.text
    except Exception as err:
        return err

# 解析起始页  
def parse_index_page(html):
    soup = BeautifulSoup(html, 'lxml')
    cate_items = soup.select('.cate_items a')
    for cate in cate_items:
        cate_url_dict = {cate.get_text(): cate.attrs['href']}
        yield cate_url_dict

# 用于得到每一个概念的html   
def get_block_detail(url, driver):
    try:
        driver.get(url)
        time.sleep(11)
        page_sourcecode = driver.page_source
        return page_sourcecode
    except Exception as err:
        return err

# 解析每个详情页
def parse_block_detail(html, concept):
    try:
        soup = BeautifulSoup(html, 'html.parser')
        table_tr = soup.select('body > table > tbody > tr ')
        td_list = []
        for td in table_tr:
            td.select('td > a')
            td_str = td.text.split()
            td_list.append([td_str[0], td_str[1], td_str[2], concept])
        return td_list
    except Exception as err:
        print(html)


def main():
    url = 'http://q.10jqka.com.cn/gn/'
    header = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'}
    driver = webdriver.Chrome(chrome_options=option)
    # driver = webdriver.Chrome()
    driver.implicitly_wait(10)
    # 得到主页的html
    html = get_index_page(url, header)
    # 解析成每个概念股对应的url, 为迭代器
    cate_url_dict = parse_index_page(html)
    concept_stock = []
    for each_url in cate_url_dict:
        stock_ = []
        # cate_stock_num = 0
        # 得到每一个概念的url,并得到其html源码
        each_url_value = list(each_url.values())[0]
        index_html = get_index_page(each_url_value, header)
        soup = BeautifulSoup(index_html, 'html.parser')
        try:
            page_num = int(soup.select('#m-page > span')[0].text.split('/')[-1])
        except IndexError as err:
            page_num = 1
        # 利用selenium解析每一页网页源代码
        for page_i in range(page_num):
            detail_ = each_url_value.split('detail/')
            detail_url = detail_[0] + 'detail/field/264648/order/desc/page/' + str(page_i+1) + '/ajax/1/' + detail_[1]
            detail_html = get_block_detail(detail_url, driver)
            detail_stock = parse_block_detail(detail_html, list(each_url.keys())[0])
            stock_.append(detail_stock)
        concept_stock.append(stock_)
    return concept_stock



if __name__ == '__main__':
    concept_stock = main()
