import pymysql
from bs4 import BeautifulSoup
import redis


class FoodInfo:
    def __init__(self):
        self.connect = pymysql.connect(host='localhost', user='root', password='wu123456', db='foodinfo', port=3306)
        self.cursor = self.connect.cursor()
        self.sql =  'select * from food_info_source where url="%s";'

    def initdata(self):
        self.urls = {}
        self.urls["美食天下"] = "https://home.meishichina.com/search/%s/"
        self.urls["美食杰"] = "https://so.meishi.cc/index.php?q=%s"
        self.urls["薄荷"] = "http://www.boohee.com/food/search?keyword=%s"
        self.urls["Hi运动"] = "https://food.hiyd.com/food/search?kw=%s"
        self.urls["香哈"] = "https://www.xiangha.com/so/?q=caipu&s=%s"
        self.urls["豆果美食"] = "https://www.douguo.com/search/recipe/%s"

        self.domain = {}
        self.domain["meishichina.com"] = "美食天下"
        self.domain["meishi.cc"] = "美食杰"
        self.domain["boohee.com"] = "薄荷"
        self.domain["hiyd.com"] = "Hi运动"
        self.domain["xiangha.com"] = "香哈"
        self.domain["douguo.com"] = "豆果美食"

    def __del__(self):
        self.cursor.close()
        self.connect.close()

    def queryData(self, sql):
        row = []
        try:
            self.cursor.execute(sql)
            row = self.cursor.fetchall()
        except:
            print(sql + ' execute failed.')
        return row

    def run(self):

        url = "https://home.meishichina.com/search/番茄炒蛋/"
        sql = self.sql % (url)
        res = self.queryData(sql)
        html_str = res[0][2]
        content_list = self.mstx_content(html_str)
        print(content_list)
        pass

    def mstx_content(self, html_str):  # 提取数据
        content_list = []
        soup = BeautifulSoup(html_str, 'lxml')
        # with open("./file.html", "w+", encoding="utf-8") as f:
        #     f.write(str(html_str))
        tag = soup.find("div", class_="ui_list_1")
        # print(tag)
        for li in tag.find_all('li'):
            info = li.find_all("a")[1]
            item = {}
            find = info.find("em")
            if find is None:
                continue
            item["source"] = "美食天下"
            item["title"] = find.text
            item["href"] = info['href']
            if item["href"] is None and item["href"] == "":
                continue
            sql = self.sql % item["href"]
            res = self.queryData(sql)
            subcontent = res[0][2]
            subsoup = BeautifulSoup(subcontent, 'lxml')
            # with open("./sub_file.html", "w+", encoding="utf-8") as f:
            #             #     f.write(str(subcontent))
            item["title"] = subsoup.find("h1", class_="recipe_De_title").find("a")["title"]
            subtag = subsoup.find_all("fieldset", class_="particulars")
            for onetag in subtag:
                cate = onetag.find("legend").text
                if cate is None or cate == "":
                    continue
                res = onetag.find_all("b")
                item[cate] = [data.text for data in res]
            content_list.append(item)
        return content_list

import os

if __name__ == '__main__':
    foodop = FoodInfo()
    conn = redis.Redis(host="127.0.0.1", port=6379)
    conn.set("name", "https://home.meishichina.com/search/番茄炒蛋/".encode("utf-8"))
    val = conn.get("name")
    # conn.rpush("url:foodinfo:list", 'https://home.meishichina.com/search/番茄炒蛋/')
    # conn.rpush("url:foodinfo:list", 'https://so.meishi.cc/index.php?q=番茄炒蛋')
    # conn.rpush("url:foodinfo:list", 'http://www.boohee.com/food/search?keyword=番茄炒蛋')
    # conn.rpush("url:foodinfo:list", 'https://food.hiyd.com/food/search?kw=番茄炒蛋')
    conn.rpush("url:foodinfo:list", 'https://www.xiangha.com/so/?q=caipu&s=番茄炒蛋')
    conn.rpush("url:foodinfo:list", 'https://www.douguo.com/search/recipe/番茄炒蛋')
    # val2 = conn.lpop("url:foodinfo:list")
    # print(val2.decode("utf-8"))
    print(val.decode("utf-8"))
    os.system("scrapy crawl foodinfospider")