import requests
from bs4 import BeautifulSoup
import traceback
import re

def getHTML(url):
    try:
        r = requests.get(url,timeout=30)
        r.raise_for_status()
        r.encoding =r.apparent_encoding
        return r.text
    except:
        return 0

if __name__ == '__main__':
    output_file = 'C:/Users/kai/Desktop/邮科院学习/爬虫/stock.txt'
    url = 'https://gupiao.baidu.com/stock/sz00000'

    url = url+'1'+'.html'
    html = getHTML(url)
    infoDict = {}
    soup = BeautifulSoup(html, 'html.parser')
    stockinfo = soup.find('div',attrs={'class':"stock-bets"})
    name = stockinfo.find_all(attrs={'class': 'bets-name'})[0]
    infoDict.update({'股票名称': name.text.split()[0]})
    keyList = stockinfo.find_all('dt')
    valueList = stockinfo.find_all('dd')

    for i in range(len(keyList)):
        key = keyList[i].text
        val = valueList[i].text
        infoDict[key] = val

    with open(output_file,'a',encoding='utf-8') as f:
        f.write(str(infoDict)+'\n')
