import re

from scrapy import Request

from goodDesignImage2018Spider.caipuItems import *
from goodDesignImage2018Spider.dao.waimaiyuan.WaiMaiYuanItemDao import *
from goodDesignImage2018Spider.dao.waimaiyuan.caipuInfoDao import CaipuInfo
from goodDesignImage2018Spider.dto.waimaiyuanDtos import *
from itertools import *
import json


def first_level(x):
    return x.item_parent_id == -1


def no_first_level(x):
    return x.item_parent_id != -1


def get_1st_cp_info(all_cp):
    # 一级菜谱
    first_level_cp = {}
    for cp in takewhile(first_level, all_cp):
        first_level_cp[cp.item_id] = cp

    return first_level_cp


def get_2nd_cp_info(all_cp):
    # 二级菜谱,根据父id分组
    second_cp = {}
    not_first_cp = filter(no_first_level, all_cp)
    for key, items in groupby(not_first_cp, key=lambda x: x.item_parent_id):
        second_cp[key] = list(items)

    return second_cp


class MeishijDetailSpider(scrapy.Spider):
    name = 'meishijDetailSpider'
    allowed_domains = ['meishij.net']
    start_urls = 'https://www.meishij.net/'

    def start_requests(self):
        yield Request(url=self.start_urls, callback=self.parse)

    def parse(self, response):
        # 获取库里的菜谱分类数据进行爬虫
        all_cp = WaiMaiYuanItemOrm.get_by_type(0)
        first_cp = get_1st_cp_info(all_cp)
        second_cp = get_2nd_cp_info(all_cp)

        # 遍历一级菜谱
        for item_id, cp in first_cp.items():

            # 遍历二级菜谱
            for single_second_cp in second_cp.get(item_id):
                print(single_second_cp.item_name)
                yield Request(url=single_second_cp.item_remark, callback=self.parse_second_cp_page,
                              meta={"first_cp": cp, "second_cp": single_second_cp})

    # 解析二级菜谱页面
    def parse_second_cp_page(self, response):
        list_content = response.xpath("/html/body/div[2]/article/div[2]/div[1]/div")

        for list_item in list_content:
            # 主图信息
            cp_detail_main_img_info = list_item.xpath("./div/a[1]/@style")[0].extract()
            # 主图url (通过正则表达式截图小括号内容)
            cp_detail_main_img_url = re.findall(r'[(](.*?)[)]', cp_detail_main_img_info)[0]
            # 菜谱明细url
            cp_detail_url = list_item.xpath("./div/a[1]/@href")[0].extract()
            # 菜谱标题
            cp_detail_title = list_item.xpath("./a/strong/text()").get()
            yield Request(url=cp_detail_url, callback=self.parse_cp_detail_page,
                          meta={"first_cp": response.meta["first_cp"],
                                "second_cp": response.meta["second_cp"],
                                "main_img_url": cp_detail_main_img_url,
                                "cp_detail_title": cp_detail_title
                                })

    def parse_cp_detail_page(self, response):

        # id (video_flag不影响)
        third_id = response.xpath("/html/body/div[2]/article/input/@news_id")[0].extract()

        # 检查是否已经存在,如果已经入库过就不再爬了
        caipu_exists_flag = CaipuInfo.check_cp_info_by_third_id(third_id)
        if caipu_exists_flag is True:
            print("%s已经存在了", third_id)
            return

        first_cp = response.meta["first_cp"]
        second_cp = response.meta["second_cp"]
        main_img_url = response.meta["main_img_url"]
        cp_detail_title = response.meta["cp_detail_title"]

        check_video_x = response.xpath("/html/body/div[2]/article/div[1]/div/div[1]/@class")[0].extract()
        # 是否视频
        video_flag = 'video' in check_video_x

        # 工艺信息
        tech_infos_x_video = "/html/body/div[2]/article/div[1]/div/div[4]/div[1]/div"
        tech_infos_x_img = "/html/body/div[2]/article/div[1]/div/div[2]/div[1]/div"
        tech_infos_x = response.xpath(tech_infos_x_video if video_flag is True else tech_infos_x_img)
        tech_infos = {}
        for tech in tech_infos_x:
            key = tech.xpath("./em/text()").get()
            value = tech.xpath("./strong/text()").get()
            tech_infos[key] = value

        # 主料
        main_ingredients_x_video = "/html/body/div[2]/article/div[1]/div/div[4]/div[2]/div[1]/div[2]/strong"
        main_ingredients_x_img = "/html/body/div[2]/article/div[1]/div/div[2]/div[2]/div[1]/div[2]/strong"
        main_ingredients_x = response.xpath(main_ingredients_x_video if video_flag is True else main_ingredients_x_img)
        main_ingredients = {}
        for main_ingredient in main_ingredients_x:
            key = main_ingredient.xpath("./a[1]/text()").get()
            value = main_ingredient.xpath("./text()").get()
            main_ingredients[key] = value

        #         辅料
        others_ingredients_x_video = "/html/body/div[2]/article/div[1]/div/div[4]/div[2]/div[2]/div[2]/strong"
        others_ingredients_x_img = "/html/body/div[2]/article/div[1]/div/div[2]/div[2]/div[2]/div[2]/strong"
        others_ingredients_x = response.xpath(
            others_ingredients_x_video if video_flag is True else others_ingredients_x_img)
        others_ingredients = {}
        for other_ingredient in others_ingredients_x:
            others_ingredients[other_ingredient.xpath("./a[1]/text()").get()] = other_ingredient.xpath("./text()").get()

        # 简介 (video_flag不影响)
        intro = response.xpath("/html/body/div[2]/article/div[2]/div[1]/div[1]/div[3]/p/text()").get()

        # tips
        tips = response.xpath("/html/body/div[2]/article/div[2]/div[1]/div[4]/div[2]/p/text()").get()

        # main img
        main_img = FileInfoDto(main_img_url, 0, "", "", "", "菜谱主图")

        # 步骤信息(video_flag不影响)
        cp_steps = []
        cp_steps_x = response.xpath("/html/body/div[2]/article/div[2]/div[1]/div[2]/div")
        step_index = 1
        for step in cp_steps_x:
            # 步骤可能没有图
            step_img = step.xpath("./div[2]/img")
            step_img_dto = None
            if len(step_img) > 0:
                step_img_url = step.xpath("./div[2]/img/@src")[0].extract()
                step_img_dto = FileInfoDto(step_img_url, 1, "", "", "", "菜谱步骤图")

            step_text = step.xpath("./div[2]/p[1]/text()").get()

            step_info_dto = CaipuStepInfoDto(step_index, step_text, step_img_dto)
            cp_steps.append(step_info_dto)
            step_index += 1

        caipu_info_dto = CaiPuInfoDto(
            caipu_imgs=main_img,
            caipu_intro=intro,
            caipu_main_ingredients_json=json.dumps(main_ingredients, ensure_ascii=False),
            caipu_others_ingredients_json=json.dumps(others_ingredients, ensure_ascii=False),
            caipu_steps=cp_steps,
            caipu_tech=json.dumps(tech_infos, ensure_ascii=False),
            caipu_third_source_id=third_id,
            caipu_tips=tips,
            caipu_title=cp_detail_title,
            caipu_category_id_1=first_cp.item_id,
            caipu_category_id_2=second_cp.item_id,
        )
        caipu_info = CaipuInfoItem2()
        caipu_info['caipu_info_dto'] = caipu_info_dto
        yield caipu_info
