# 好好学
import requests
from bs4 import BeautifulSoup
import json
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0',
    'Accept-Encoding': 'gzip, deflate'
}

# 目标网站1
base_url = 'http://www.icgroupcas.cn/website_bchtk'
db_url = f'{base_url}/chazhao.aspx'
db_res = requests.get(db_url, headers=headers)
db_res.encoding = db_res.apparent_encoding
db_soup = BeautifulSoup(db_res.text, "html.parser")

# 元素定位
db_table = db_soup.find('div', id='MBody_tukudaohang')
crop_table = db_table.find('table')
crop_trs = crop_table.find_all('tr')

crop_db = {}  # key:农作物名称 value:对应的 url 链接
for crop_tr in crop_trs:
    crop_td = crop_tr.find('td')
    crop_a = crop_tr.find('a')
    if crop_td and crop_a:
        crop_db[crop_td.text] = f'{base_url}/{crop_a.get("href")}'

# 抓取每个农作物对应链接的数据
crop_data = {}  # key:农作物名称 value:{key:疾病名称 value:对应asp链接}
for name in crop_db:
    skeleton_url = crop_db[name]
    skeleton_res = requests.get(skeleton_url, headers=headers)
    skeleton_res.encoding = skeleton_res.apparent_encoding  # 解决中文乱码问题
    skeleton_soup = BeautifulSoup(skeleton_res.text, "html.parser")
    # 提取当前作物的所有病害名称和对应的 asp 链接
    disease = {}
    disease_links = skeleton_soup.find_all('a')
    for disease_link in disease_links:
        href = disease_link.get('href')
        extension = href.split('.')[0]
        extension = extension.split('_')
        if len(extension) == 3:
            tail = extension[1]
            tail = tail[-2:]
            if tail == 'bh' or tail == 'ch':
                disease[disease_link.text] = f'{base_url}/{href}'
    crop_data[name] = disease

final_data = {}
for name in crop_data:
    disease_to_solutions = {}
    for disease in crop_data[name]:
        asp = crop_data[name][disease]
        disease_res = requests.get(asp, headers=headers)
        disease_res.encoding = disease_res.apparent_encoding
        disease_soup = BeautifulSoup(disease_res.text, "html.parser")
        solution_lis = disease_soup.find('ul').find_all('li')
        solutions = []
        for li in solution_lis:
            solutions.append(li.text)
        disease_to_solutions[disease] = solutions
    final_data[name] = disease_to_solutions

with open("output/crop_disease.json", "w", encoding="utf-8") as file:
    json.dump(final_data, file, ensure_ascii=False, indent=4)