import requests
import csv
import time
from bs4 import BeautifulSoup
from time import sleep
import random
import traceback


def get_one_page(url):
    n = 3
    while True:
        try:
            sleep(random.uniform(1, 2))
            headers = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/"
                              "537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 Edg/109.0.1518.78"
            }
            response = requests.get(url, headers=headers, timeout=10)
            return response.text
        except (TimeoutError, Exception):
            n -= 1
            if n == 0:
                print('请求3次失败，放弃此url请求，检查请求条件')
                return
            else:
                print('请求失败，重新请求')
                continue


def parse_one_page(html, writer):
    try:
        page = BeautifulSoup(html, "html.parser")  # 指定html解析器
        table = page.find("table", attrs={"class": "price-table"})  # 和上一行是一个意思，此时可以避免class
        trs = table.find_all("tr")[1:]  # tr是行的意思
        for tr in trs:  # 每一行
            tds = tr.find_all("td")
            tr = [
                ''.join(tds[0].text),
                ''.join(tds[1].text),
                ''.join(tds[2].text),
                ''.join(tds[3].text),
                ''.join(tds[4].text)
            ]
            print(tr)

            writer.writerow(tr)

    except Exception:
        print(traceback.print_exc())


def main(x):
    url = 'http://www.jnmarket.net/import/list-1_{}.html'.format(x)
    html = get_one_page(url)
    with open('vegetable2.csv', 'a', encoding='utf_8_sig', newline='') as fp:
        writer = csv.writer(fp)
        writer.writerow(['菜名', '产地', '均价(元/公斤)', '规格', '日期'])
        parse_one_page(html, writer)


if __name__ == '__main__':
    for i in range(8000, 10000):
        main(x=i)
        time.sleep(random.uniform(1, 2))
        print("第" + str(i) + "页提取完成")
