import requests
from bs4 import BeautifulSoup
import json
import pandas as pd

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0',
    'Accept-Encoding': 'gzip, deflate'
}

# 爬取目标网站的病虫害防治方法
db_link = 'http://www.icgroupcas.cn/website_bchtk/chazhao.aspx'
db_res = requests.get(db_link, headers=headers)
db_res.encoding = db_res.apparent_encoding
db_soup = BeautifulSoup(db_res.text, "html.parser")

db_table = db_soup.find('div', id='MBody_tukudaohang')
crop_list = db_table.find('table')
crop = crop_list.find_all('tr')
crops = []
crop_link = []
for c in crop:
    td = c.find('td')
    if td:
        # print(td.text)  # 打印出农作物名称
        crops.append(td.text)
    a = c.find('a')
    if a:
        crop_link.append('http://www.icgroupcas.cn/website_bchtk/'+a.get('href'))
# print(crops)
# print(crop_link)

# 抓取 *水稻* 框架网页
CropDisease =[]
CropSolution = []
for i in range(0, len(crop_link)):

    skeleton_url = crop_link[i]

    skeleton_res = requests.get(skeleton_url, headers=headers)
    skeleton_res.encoding = skeleton_res.apparent_encoding  # 中文乱码问题
    skeleton_soup = BeautifulSoup(skeleton_res.text, "html.parser")

    # 提取水稻的所有病害名称和 asp 链接
    disease_a = skeleton_soup.find_all('a')
    disease_asp_links = []
    disease_name = []
    for link in disease_a:
        href = link.get('href')
        extension = href.split('.')[0]
        extension = extension.split('_')
        if len(extension) == 3:
            tail = extension[1]
            tail = tail[-2:]
            if tail == 'bh' or tail == 'ch':
                disease_asp_links.append('http://www.icgroupcas.cn/website_bchtk/' + href)
                disease_name.append(link.text)

    solutions = []
    for n in range(0, len(disease_asp_links)):
        disease_res = requests.get(disease_asp_links[n], headers=headers)
        disease_res.encoding = disease_res.apparent_encoding  # 中文乱码问题
        skeleton_soup = BeautifulSoup(disease_res.text, "html.parser")
        solution = skeleton_soup.find('ul')
        solutions.append(solution.text)
    CropDisease.append(disease_name)
    CropSolution.append(solutions)

    # 格式化为json、Excel
crop_disease = {'Name': crops, 'Disease': CropDisease, 'Solution': CropSolution}

with open("output/crop_disease.json", "w", encoding="utf-8") as file:
    json.dump(crop_disease, file, ensure_ascii=False, indent=4)
print("已转换为json格式")
df = pd.DataFrame(crop_disease)
df.to_excel("output/crop_disease.xlsx", index=False, engine="openpyxl")
