"""
@Project:take_out_project
@File:pc.py
@Author:袁浩天
@Date:9:38
"""
# import requests
# url = "https://www.baidu.com"
# r = requests.get(url)
# r.encoding="utf-8"
# print(r.text)
import pymysql
import requests
from bs4 import BeautifulSoup
import os


# 数据爬取
def get_html(url):
    r = requests.get(url, headers=headers)
    r.encoding = "utf-8"
    return r.text




# 数据解析
def parse_data(html_doc):
    soup = BeautifulSoup(html_doc, "html.parser")
    root = "d://day726//pic//"

    dev_list = soup.select("body > div.main_w.clearfix > article > div.list_s2 > div.list_s2_content> div")
    print(dev_list)
    lists = []
    for dev in dev_list:
        link = dev.select("div.imgw > a.list_s2_item_img")[0].get("style")
        title = dev.select("a.list_s2_item_info > strong.title")[0].string
        food = dev.select("a.list_s2_item_info > span.sc")[0].string
        img = link.split('(')[-1].split(')')[0]
        path = root + img.split("/")[-1]
        print(title)
        print(type(title))
        print(url)
        print(link)
        try:
            if not os.path.exists(root):
                os.makedirs(root)

            r = requests.get(img)
            print("文件大小", len(r.content)/1024, "kb")
            with open(path, "wb") as f:
                print("文件正在保存...")
                f.write(r.content)
                print("文件保存成功")
        except Exception as e:
            print("爬取失败", e)
        lists.append({"name":title, "url":path, "food":food})
    print(lists)
    return lists


# 数据存储
def store_data(lists1):
    count = pymysql.connect(
        host="127.0.0.1",
        port=3306,
        user="root",
        password="123",
        database="take_out",
        # charset=True
    )
    for i in lists1:
        print("i>>>>>>>>>>>>>>", i)
        url = i["url"]
        name = i["name"]
        food = i["food"]
        print(name)
#         db = count.cursor()
#         sql = f"insert into food_tb(url,name,seasoning) values ('{url}','{name}','{food}')"
#         db.execute(sql)
#         count.commit()
#         count.close()


if __name__ == '__main__':
    url = "https://www.meishij.net/caixi/chuancai/"

    headers = {
        'Accept': '*/*',
        'Accept-Language': 'en-US,en;q=0.8',
        'Cache-Control': 'max-age=0',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36 Edg/100.0.1185.50',
        'Connection': 'keep-alive',
        'Referer': 'https://news.sina.com.cn/'
    }

    html_doc = get_html(url)
    lists = parse_data(html_doc)
    store_data(lists)


