# http://quote.eastmoney.com/stocklist.html

import requests
import csv
from requests.exceptions import ReadTimeout,ConnectionError,RequestException
from bs4 import BeautifulSoup as bs
import json
import requests
from urllib import request,parse

class Agu(object):
    def request_url(self,url_string):
        header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
            'Host':'quote.eastmoney.com'
        }
        try:
            with requests.get(url_string,headers=header) as f:
                if f.status_code == requests.codes.ok:
                    return f.content.decode('GBK')
                else:
                    print('Stats',f.status_code,f.reason)
        except ReadTimeout:
            print('Time Out')
        except ConnectionError:
            print('Connection Error')
        except RequestException:
            print('RequestError')
    def get_html_data(self):
        url_string = r'http://quote.eastmoney.com/stocklist.html'
        results = Agu.request_url(self,url_string)
        if requests:
            return results
        else:
            print('no datas')
    def parse_html(self):
        req = Agu.get_html_data(self)
        soup = bs(req, 'html.parser')
        find_id = soup.find('div',id = 'quotesearch')
        find_ul = find_id.find_all('ul')
        list_num = []
        for find_li in find_ul[1].find_all('li'):# 0 是上海 sh  1 是深圳 sz
                for i in find_li.find_all('a',target ='_blank'):
                    if i.string == None and len(str(i.string))<6:
                        pass
                    else:
                        str_num = str(i.string[-2:-8:-1][::-1])
                        if str_num.isdigit():
                            list_num.append('sz'+str_num)
        return list_num
class Row(object):
    def request_url1(url_string):
        header = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
            'Host':'gupiao.baidu.com'
        }
        try:
            with requests.get(url_string,headers=header) as f:
                if f.status_code == requests.codes.ok:
                    return f.content.decode('utf-8')
                else:
                    print('Stats',f.status_code,f.reason)
        except ReadTimeout:
            print('Time Out')
        except ConnectionError:
            print('Connection Error')
        except RequestException:
            print('RequestError')


    def get_html_data1(self,num):
            url_string = r'https://gupiao.baidu.com/stock/'+num+'.html'
            results = Row.request_url1(url_string)
            if requests:
                return results
            else:
                print('no datas')


    def parse_html1(self,req):
        soup = bs(req, 'html.parser')
        find_class = soup.find('div',class_ = 'bets-content')
        dict_all = {}
        for find_dl in find_class.find_all('dl'):
            for find_dt in find_dl.find_all('dt'):
                if find_dt.string == None:
                    key = '市盈率'
                else:
                    key = find_dt.string
            for find_dd in find_dl.find_all('dd'):
                value = find_dd.string.strip()
            dict_all.setdefault(key,value)
        return dict_all
def write1(dict_data):
    with open('szgupiao.csv', 'w', newline='', encoding='utf-8') as f_csv:
        f_name = ['股票代码', '交易情况']
        writer = csv.DictWriter(f_csv, fieldnames=f_name)
        writer.writeheader()
        writer.writerows(dict_data)
        # for row in dict_data:
        #     writer.writerow(row)
agu = Agu()
row = Row()
def main():
    list_num = agu.parse_html()
    list_data = []
    for i in list_num[:1]:
        req = row.get_html_data1(str(i))
        dict_all = row.parse_html1(req)
        list_data.append({'股票代码':i,'交易情况':dict_all})
    print(list_data)
    write1(list_data)
if __name__ == '__main__':
    main()






'''
测试  sh501000

def request_url1(url_string):
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)',
        'Host': 'gupiao.baidu.com'
    }
    try:
        with requests.get(url_string, headers=header) as f:
            if f.status_code == requests.codes.ok:
                return f.content.decode('utf-8')
            else:
                print('Stats', f.status_code, f.reason)
    except ReadTimeout:
        print('Time Out')
    except ConnectionError:
        print('Connection Error')
    except RequestException:
        print('RequestError')




def get_html_data(num):
    url_string = r'https://gupiao.baidu.com/stock/'+num+'.html'
    results = request_url1(url_string)
    if requests:
        return results
    else:
        print('no datas')


def parse_html1():
    req = get_html_data('sh501000')
    soup = bs(req, 'html.parser')
    find_class = soup.find('div', class_='bets-content')
    list_all = []
    for find_dl in find_class.find_all('dl'):
        for find_dt in find_dl.find_all('dt'):
            if find_dt.string == None:
                key = '市盈率'
            else:
                key = find_dt.string
        for find_dd in find_dl.find_all('dd'):
            value = find_dd.string.strip()
        list_all.append({key: value})
    return list_all
print(parse_html1())
'''