"""
@Project:catering_takeout
@File:caipin.py
@Author:柴奇峰
@Date:10:00
"""
import requests
from bs4 import BeautifulSoup
import os
import pymysql


db = pymysql.connect(host='127.0.0.1', port=3306, user='root', password='123', db='catering_takeout',
                         charset='utf8')
cursor = db.cursor()

def get_html(url):
    r = requests.get(url)
    r.encoding = 'utf-8'
    return r.text

def parse_data(html_doc):
    soup = BeautifulSoup(html_doc, "html.parser")
    li_list = soup.select("body > div.main_w.clearfix > article > div.list_s2 > div.list_s2_content > div")
    list = []
    for i in li_list:
        link = i.select("div.imgw > a.list_s2_item_img")[0].get("href")
        title = i.select("a.list_s2_item_info > strong.title")[0].string
        main_food = i.select("a.list_s2_item_info > span.sc")[0].string
        img1 = i.select("div.imgw > a.list_s2_item_img")[0].get("style")
        img = img1.split('(')[-1].split(')')[0]
        print(link)
        print(title)
        print(img)
        print(main_food)

        url = img
        root = "C://Users//admin//Desktop//p9//catering_takeout//static//img//"

        path = root + url.split("/")[-1]  # 这句话可以保证原来图片叫什么名字，爬下来时候还叫什么名字
        asd = url.split('/')[-1]
        print(path)
        try:
            if not os.path.exists(root):  # 判断磁盘制定文件夹是否存在，
                os.makedirs(root)  # 如果不存在就创建文件夹

            r = requests.get(url)
            print("文件大小", len(r.content) / 1024, "kb")
            with open(path, "wb") as f:
                print("正在保存文件...")
                f.write(r.content)  # 向文件中写入二进制内容
                print("文件保存成功")
        except Exception as e:
            print("爬取失败", e)

        try:
            sql = f"insert into pa_chong (title,main_food,img)" \
                  f" values ('{title}','{main_food}','{asd}')"
            cursor.execute(sql)
            db.commit()
        except Exception as e:
            db.rollback()
            print(e)







if __name__ == '__main__':
    url = 'https://www.meishij.net/caixi/chuancai/'
    html_doc = get_html(url)
    parse_data(html_doc)







