import requests
import pymysql
from bs4 import BeautifulSoup

#1.连接数据库，创建数据库，过去的天气没有意义，因此直接覆盖，不保留过去的天气数据
db = pymysql.connect(host="localhost", user="root", password="123456", database="spring")
curses = db.cursor()
curses.execute("drop table if exists tianqi1") #当天的天气数据
sql = """create table tianqi1(
        province varchar(20),
        city varchar(20),
        weather varchar(20),
        wind varchar(20),
        maxte varchar(20),
        nightweather varchar(20),
        nightwind varchar(20),
        minte varchar(20))
        """
curses.execute(sql)

#2.抓取网页数据，港澳台数据特殊需要另写,补在最末

headers={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.34',
    'Upgrade-Insecure-Requests': '1',
    'Referer': 'http://www.weather.com.cn/forecast/',
    'Host': 'www.weather.com.cn'
}

urls = [
    "http://www.weather.com.cn/textFC/hb.shtml",    #华北天气网址
    "http://www.weather.com.cn/textFC/db.shtml",    #东北天气网址
    "http://www.weather.com.cn/textFC/hd.shtml",    #华东天气网址
    "http://www.weather.com.cn/textFC/hz.shtml",    #华中天气网址
    "http://www.weather.com.cn/textFC/hn.shtml",    #华南天气网址
    "http://www.weather.com.cn/textFC/xb.shtml",    #西北天气网址
    "http://www.weather.com.cn/textFC/xn.shtml",    #西南天气网址
]
for url in urls:
    req = requests.get(url, headers=headers)    #得到返回结果

# url = 'http://www.weather.com.cn/textFC/hb.shtml'   #华北天气网址
# req = requests.get(url, headers=headers)    #得到返回结果
#print(req.content) #检查爬取结果

#3.清洗内容，提取所需数据

    text = req.content.decode('utf-8')   #保存爬取的数据包,保存为字符
    soup = BeautifulSoup(text,'lxml')    #清洗标签
    #print(soup.text)   #检验清洗结果
    soup1 = soup.find('div', class_='conMidtab') #提取第一个conMidtab的内容
    souplist = soup1.find_all('div', class_='conMidtab2')   #获取第一个conModtab下的所有conMidtab2的列表

    for x in souplist:
        tr_list = x.find_all('tr')[2:]  #过滤掉前两个tr
        province = ''
        for index,tr in enumerate(tr_list):
            if index==0:    #第一个tr页中同时包含省份信息和城市信息
                td_list = tr.find_all('td')
                province = td_list[0].text.replace('\n','')     #省份
                city = td_list[1].text.replace('\n','')     #城市
                weather = td_list[2].text.replace('\n','')  #天气
                wind = td_list[3].text.replace('\n', '')    #风向
                maxte = td_list[4].text.replace('\n', '')     #最高气温
                nightweather = td_list[5].text.replace('\n', '')    #夜晚天气
                nightwind = td_list[6].text.replace('\n', '')   #夜晚风向
                minte = td_list[7].text.replace('\n', '')     #最低气温
            else:
                td_list = tr.find_all('td')
                city = td_list[0].text.replace('\n','')
                weather = td_list[1].text.replace('\n', '')
                wind = td_list[2].text.replace('\n', '')
                maxte = td_list[3].text.replace('\n', '')
                nightweather = td_list[4].text.replace('\n', '')
                nightwind = td_list[5].text.replace('\n', '')
                minte = td_list[6].text.replace('\n', '')

# 4.将数据存入数据库

            insert = "insert into tianqi1(province,city,weather,wind,maxte,nightweather,nightwind,minte)" \
                     " values('%s','%s','%s','%s','%s','%s','%s','%s')"%(province,city,weather,wind,maxte,nightweather,nightwind,minte)
            curses.execute(insert)
            db.commit()
            # print(province+city+weather+wind+maxte+nightweather+nightwind+minte)  #检验清洗后的数据结果


#5.港澳台天气数据，步骤同上

urls_1 = [
    "http://www.weather.com.cn/textFC/hongkong.shtml",  #香港天气情况
    "http://www.weather.com.cn/textFC/macao.shtml",     #澳门天气情况
    "http://www.weather.com.cn/textFC/taiwan.shtml"     #台湾天气情况
]
for url in urls_1:
    req = requests.get(url, headers=headers)    #得到返回结果
    text = req.content.decode('utf-8')   #保存爬取的数据包,保存为字符
    soup = BeautifulSoup(text,'lxml')    #清洗标签
    soup1 = soup.find('div', class_='conMidtab') #提取第一个conMidtab的内容
    souplist = soup1.find_all('div', class_='conMidtab3')   #获取第一个conModtab下的所有conMidtab3的列表
    for x in souplist:
        tr_list = x.find_all('tr') #过滤掉前两个tr
        province = ''
        for index,tr in enumerate(tr_list):
            if index == 0:    #第一个tr页中同时包含省份信息和城市信息
                td_list = tr.find_all('td')
                province = td_list[0].text.replace('\n','')     #省份
                city = td_list[1].text.replace('\n','')     #城市
                weather = td_list[2].text.replace('\n','')  #天气
                wind = td_list[3].text.replace('\n', '')    #风向
                maxte = td_list[4].text.replace('\n', '')     #最高气温
                nightweather = td_list[5].text.replace('\n', '')    #夜晚天气
                nightwind = td_list[6].text.replace('\n', '')   #夜晚风向
                minte = td_list[7].text.replace('\n', '')     #最低气温
            else:
                td_list = tr.find_all('td')
                city = td_list[0].text.replace('\n','')
                weather = td_list[1].text.replace('\n', '')
                wind = td_list[2].text.replace('\n', '')
                maxte = td_list[3].text.replace('\n', '')
                nightweather = td_list[4].text.replace('\n', '')
                nightwind = td_list[5].text.replace('\n', '')
                minte = td_list[6].text.replace('\n', '')
            insert = "insert into tianqi1(province,city,weather,wind,maxte,nightweather,nightwind,minte)" \
                     " values('%s','%s','%s','%s','%s','%s','%s','%s')"%(province,city,weather,wind,maxte,nightweather,nightwind,minte)
            curses.execute(insert)
            db.commit()
print("爬取成功了")
db.close()