# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import os
import xlrd


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


class SOMI():
    def __init__(self, type):
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            "User-Agent": ua.random
            # 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
        }
        # 读配置文件
        with open("./somi-config.txt", "r", encoding="utf-8") as fp:
            data = fp.read()
            # 基础据表
            self.path_product = data.split("\n")[0].split(":")[1]
            # 对比数据表
            self.path_product1 = data.split("\n")[1].split(":")[1]
            # 下载图片表、对比结果数据表
            self.path_pic_down = data.split("\n")[2].split(":")[1]
            # 根据选择1、2指定保存路径
            if type == "1":
                self.path = self.path_product
            elif type == "2":
                self.path = self.path_product1
            elif type == "3" or type == "4":
                self.path = self.path_pic_down
        self.tran_list = []
        # 下载图片不写表头
        if type != "3":
            with open(self.path, "a+", encoding="utf-8-sig", newline="") as f:
                csv_writer = csv.writer(f)
                csv_writer.writerow(["类别","分类","商品id","价格","标题","链接","sku1名称","sku2名称","是否缺货"])
        # 载入替换文字
        data = xlrd.open_workbook("somi-tran.xls")
        table = data.sheets()[0]
        for rownum in range(table.nrows):
            if rownum > 0:
                dict_data = dict()
                dict_data['key'] = table.row_values(rownum)[0].strip()
                dict_data['value'] = table.row_values(rownum)[1].strip()
                self.tran_list.append(dict_data)

    # 首页信息
    def get_contents(self, page, type, name, url):
        try:
            # url = f"http://www.soim.co.kr/shop/shopbrand.html?type=X&xcode=067&sort=&page={page}"
            url += f"&sort=&page={page}"
            # html = requests.get(url, headers=self.headers)
            html = HttpUtils.do_request("GET", url, self.headers, "")
            html.encoding = "EUC-KR"
            root = etree.HTML(self.tran_txt(html.text))
            # 总共页数
            page_num = int(root.xpath('//ol[@class="paging"]/li//text()')[-1:][0])
            # 最佳
            best_root = root.xpath('//div[@class="gallery"]/ul[@class="gallery_grid cb_clear"]/li')
            office_root = root.xpath('//div[@class="SMP-main-prd-container"]/ul[@class="cb_clear"]/li')
            #只保存1次top
            if page == 1:
                # 每页的top
                for b in best_root:
                    detail_url = "http://www.soim.co.kr" + b.xpath('./div/a/@href')[0]
                    pname = "".join(b.xpath('./div/p[@class="pname"]/text()')).replace("\n", "").replace("\t", "").strip()
                    subname = "".join(b.xpath('./div/p[@class="subname"]//text()')).replace("\n", "").replace("\t", "") \
                        .replace("\r", "").strip()
                    price = b.xpath('./div/p[@class="price"]/span/text()')[0]
                    print(f"===共【{page_num}】页===获取第【{page}】页===类别【{type}】===分类【{name}】===【{pname}】详细信息===")
                    # detail_url = "http://www.soim.co.kr/shop/shopdetail.html?branduid=68669"
                    # 获取详细信息
                    self.get_detail(detail_url, type, name)
            # 每页的正文
            for b in office_root:
                detail_url = "http://www.soim.co.kr" + b.xpath('./div/a/@href')[0]
                pname = "".join(b.xpath('./p[@class="pname"]/text()')).replace("\n", "").replace("\t", "").strip()
                subname = "".join(b.xpath('.//p[@class="subname"]/text()')).replace("\n", "").replace("\t", "") \
                    .replace("\r", "").strip()
                price = b.xpath('.//p[@class="price"]/span/text()')[0]
                print(f"===共【{page_num}】页===获取第【{page}】页===类别【{type}】===分类【{name}】===【{pname}】详细信息===")
                # detail_url = "http://www.soim.co.kr/shop/shopdetail.html?branduid=68669"
                # 获取详细信息
                self.get_detail(detail_url, type, name)
            # 翻页
            if page < page_num:
                self.get_contents(page + 1, type, name, url)
        except Exception as ex:
            self.error_info("get_contents", ex)

    # 详情信息
    def get_detail(self, url, type, name):
        try:
            # # 商品ID
            branduid = re.findall(r"branduid=(.*?)&", url)[0]
            # branduid = "68669"
            path = './' + branduid
            path_csv = path + "/" + branduid + ".csv"

            # html = requests.get(url, headers=self.headers)
            html = HttpUtils.do_request("GET", url, self.headers, "")
            html.encoding = "EUC-KR"
            root = etree.HTML(self.tran_txt(html.text))
            # 标题
            pname = root.xpath('//p[@class="pname"]/text()')[0]
            # 价格 原价
            price = "".join(root.xpath('//p[@class="price"]/span/strike/text()'))
            # 如果没有原价，提取现价格
            if price == "":
                price = "".join(root.xpath('//p[@class="price"]/span/text()')).replace("\n", "").replace("\t", "").strip()
            # 规格链接
            if len(root.xpath('//iframe[@id="sf_chart_iframe_id"]/@src')) > 0:
                chart_url = "http:" + root.xpath('//iframe[@id="sf_chart_iframe_id"]/@src')[0]
            else:
                chart_url = ""
            # 大图
            pic_origin_root = root.xpath('//li[@class="origin-img"]/div')
            # 缩略图
            pic_multi_root = root.xpath('//li[@class="multi-img"]/div')
            # 详情图片
            pic1_multi_root = root.xpath('//p[@style="margin: 0px; text-align: center;"]')
            # 内衣规格图片
            if len(root.xpath('//div[@id="detailCnt1"]/div/p')) > 0:
                pic2_multi_root = root.xpath('//div[@id="detailCnt1"]/div/p')
            # 如果已经存在文件夹，不下载图片
            # isExists=os.path.exists(path)
            # if not isExists:
                # 创建文件夹
                # os.makedirs(path)
                # # 下载缩略图
                # self.down_pic(path, pic_multi_root)
                # # 下载内衣规格图片
                # if len(pic2_multi_root) > 0:
                #     self.down_pic(path, pic2_multi_root)
                # # 下载详情图片
                # self.down_pic(path, pic1_multi_root)
            # sku库存
            sku1_root = root.xpath('//select[@id="MK_p_s_0"]/option[@opt_price="0"]')
            sku2_root = root.xpath('//select[@id="MK_p_s_1"]/option[@opt_price="0"]')
            sku2_count = len(sku2_root)  # 第二个sku有多少项
            # 如果有两个select
            flag1 = True
            for c in sku1_root:
                flag2 = True
                sku_name = c.xpath('./text()')[0]  # sku名称
                opt_value = c.xpath('./@value')[0]  # sku选中opt值
                opt_price = c.xpath('./@price')[0]  # sku选中价格
                # 判断是否缺货
                out_of_stock1 = 1 if sku_name.find("缺货") >= 0 else 0
                # 如果只有1个sku
                if sku2_count == 0:
                    # 保存sku
                    with open(self.path, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        # 第一次商品id，标题、价格、sku1、保存。以后不保存
                        if flag1 == True:
                            # 列：商品id、链接、标题、价格、sku1名称、sku2名称、是否缺货
                            csv_writer.writerow([type, name, branduid, price, pname, url, sku_name, "", out_of_stock1])
                            flag1 = False
                        else:
                            csv_writer.writerow(["", "", branduid, "", "", url, sku_name, "", out_of_stock1])
                    print(f"==============保存SKU【{sku_name}】==============")
                else:
                    print(f"==============保存SKU【{sku_name}】==============")
                    # 根据sku1的信息，获取sku2的信息。
                    sku2_list = self.sku(branduid, opt_price, opt_value)
                    # 保存sku
                    with open(self.path, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        for s in sku2_list:
                            # 判断是否缺货
                            out_of_stock2 = 1 if sku_name.find("품절") >= 0 else 0
                            # 第一次商品id，标题、价格、sku1、保存。以后不保存
                            if flag2 == True:
                                csv_writer.writerow([type, name, branduid, price, pname, url, sku_name, s, out_of_stock2])
                                flag2 = False
                            else:
                                csv_writer.writerow(["", "", branduid, "", "", url, "", s, out_of_stock2])
            # 获取规格并保存
            # self.get_chart(chart_url, path_csv)
        except Exception as ex:
            self.error_info("get_detail", ex)

    # 获取规格
    def get_chart(self, url, path_csv):
        try:
            if url != "":
                # html = requests.get(url, headers=self.headers)
                html = HttpUtils.do_request("GET", url, self.headers, "")
                html.encoding = "utf-8"
                root = etree.HTML(self.tran_txt(html.text))
                info1_root = root.xpath('//table[@class="tableStyle soim_custom"]/tbody/tr')
                # product = info[0].xpath('./td[2]/text()')[0]
                # color = info[1].xpath('./td[2]/text()')[0]
                # size = info[2].xpath('./td[2]/text()')[0]
                # fabric = info[3].xpath('./td[2]/text()')[0]
                # fitting_size = info[4].xpath('./td[2]/text()')[0]
                # 如果已经存在规格csv文件，先删除
                isExists=os.path.exists(path_csv)
                if isExists:
                    os.remove(path_csv)
                print("==============保存表1==============")
                info1_list = []
                # info1
                for inf in info1_root:
                    info_dict = dict()
                    info_name = inf.xpath('./td[1]/text()')[0]
                    info_value = inf.xpath('./td[2]/text()')[0]
                    # info_dict[info_name] = info_value
                    # info_list.append(info_dict)
                    info1_list.append(info_name)
                    info1_list.append(info_value)
                    # 保存表1
                    with open(path_csv, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        csv_writer.writerow(info1_list)
                        info1_list.clear()
                # 保存excel
                with open(path_csv, "a+", encoding="utf-8-sig", newline="") as f:
                    csv_writer = csv.writer(f)
                    for i in range(0, 3):
                        csv_writer.writerow([""])
                print("==============保存表2==============")
                # 尺码规格列表
                info2_root = root.xpath('//table[@class="tableStyle"]')
                flag = False
                info2_list = []
                for inf_1 in info2_root:
                    flag = False
                    for inf_t in inf_1.xpath('./tr'):
                        tag_dict = dict()
                        # 规格列
                        if flag == False:
                            for f in inf_t.xpath('./th'):
                                tag_name = "".join(f.xpath('./text()')).strip()
                                info2_list.append(tag_name)
                        # 尺码列
                        else:
                            tag_name = "".join(inf_t.xpath('./th/text()'))
                            tag_value = "".join(inf_t.xpath('./td/text()'))
                            tag_dict[tag_name] = tag_value
                            # tag_list.append(tag_dict)
                            info2_list.append(tag_name)
                            for t in inf_t.xpath('./td/text()'):
                                info2_list.append(t)
                        # 保存excel
                        with open(path_csv, "a+", encoding="utf-8-sig", newline="") as f:
                            csv_writer = csv.writer(f)
                            csv_writer.writerow(info2_list)
                            info2_list.clear()
                        flag = True
                    # 保存excel
                    with open(path_csv, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        for i in range(0, 3):
                            csv_writer.writerow([""])
                print("==============保存表3==============")
                # 表3
                info3_root = root.xpath('//table[@class="tableStyle tdLeft"]/tbody/tr')
                info3_list = []
                for inf3 in info3_root:
                    col = inf3.xpath('./th/text()')[0]
                    info3_list.append(col)
                    for v in inf3.xpath('./td/span'):
                        key = "".join(v.xpath('./text()'))
                        value = "".join(v.xpath('./img/@src'))
                        if value.find("check_skin2") > 0:  # 选中状态
                            value = 1
                            key += "_True"
                        else:
                            value = 0
                        info3_list.append(key)
                    # 保存excel
                    with open(path_csv, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        csv_writer.writerow(info3_list)
                        info3_list.clear()
        except Exception as ex:
            self.error_info("get_chart", ex)

    def sku(self, branduid, price, opt) -> list:
        """
        获取sku尺码、颜色、库存
        :param branduid: 商品id
        :param price: 价格
        :param opt: 选项序号
        :return: 名称
        """
        try:
            postData = {
                'uid': branduid,
                'option_type': 'PS',
                'selected_opt': opt,
                'discount_uid':'',
                'etctype': '|',
                'prd_sellprice': price,
                'regdate': '',
                'is_discount':'',
                'product_discount':''
            }
            url = "http://www.soim.co.kr/shop/multi_option_stock.ajax.html"
            # html = requests.post(url, headers=self.headers, data=postData)
            html = HttpUtils.do_request("POST", url, self.headers, postData)
            html.encoding = "utf-8"
            root = etree.HTML(self.tran_txt(html.text))
            sku_root = root.xpath(f'//select/option[@price={price}]')
            sku_list = []
            for s in sku_root:
                sku_name = s.xpath('./text()')[0]
                sku_list.append(sku_name)

            return sku_list
        except Exception as ex:
            self.error_info("sku", ex)


    def down_pic(self, path, pic_multi_root):
        """
        下载图片
        :param path: 保存路径
        :param pic_multi_root: 图片链接
        :return:
        """
        for p in pic_multi_root:
            try:
                for p1 in p.xpath('./img/@src'):
                    pic_url = p1
                    # 查找下载图片是包含下列名称，包含则不下载
                    key_list = ["8.jpg", "ops.jpg", "2018ss_M.jpg", "madesoim1.jpg", "pants_2017.jpg"]
                    for k in key_list:
                        if k in pic_url:
                           pic_url = ""
                           break
                    if pic_url != "":
                        pic_name = pic_url.split('/')[-1:][0]
                        print(f"==============下载图片【{pic_name}】中...==============")
                        # img = requests.get(pic_url, headers=self.headers).content
                        img = HttpUtils.do_request("GET",  "http:" + pic_url, self.headers, "").content
                        with open(path + "/" + pic_name, "wb") as f:
                            f.write(img)
            except Exception as ex:
                self.error_info("down_pic", ex)

    # 获取tap链接
    def tap(self) -> list:
        """
        获取首页TOP分类链接
        :return:list 1:品类名称 2:分类名称 3:链接地址
        """
        try:
            print("==============获取首页tap链接==============")
            html = requests.get("http://www.soim.co.kr/", headers=self.headers)
            html.encoding = "EUC-KR"
            root = etree.HTML(self.tran_txt(html.text))
            tap_root = root.xpath('//li[@class="tap-cate menu-02"]')
            # 임부복
            tap1_root = root.xpath('//ul[@class="sub-container cb_clear SP_sub_Tab1"]/li')
            # 수유복
            tap2_root = root.xpath('//ul[@class="sub-container cb_clear SP_sub_Tab2"]/li')
            # 언더웨어
            tap3_root = root.xpath('//ul[@class="sub-container cb_clear SP_sub_Tab3"]/li')
            # 출산육아
            tap4_root = root.xpath('//ul[@class="sub-container cb_clear SP_sub_Tab4"]/li')
            tap1_list = []
            tap2_list = []
            tap3_list = []
            tap4_list = []
            for t in tap1_root:
                dict_data = dict()
                name = t.xpath('./a//text()')[0]
                url = "http://www.soim.co.kr" + t.xpath('./a/@href')[0]
                if name != "NEW5%" and name != "BEST50" and name != "MADE SOIM" and name != "SALE":
                    dict_data['name'] = "孕妇装" + "|" + name
                    dict_data['url'] = url
                    tap1_list.append(dict_data)
            for t in tap2_root:
                dict_data = dict()
                name = t.xpath('./a//text()')[0]
                url = "http://www.soim.co.kr" + t.xpath('./a/@href')[0]
                if name != "NEW5%" and name != "BEST50" and name != "MADE SOIM" and name != "SALE":
                    dict_data['name'] = "哺乳装" + "|" + name
                    dict_data['url'] = url
                    tap2_list.append(dict_data)
            for t in tap3_root:
                dict_data = dict()
                name = t.xpath('./a//text()')[0]
                url = "http://www.soim.co.kr" + t.xpath('./a/@href')[0]
                if name != "NEW5%" and name != "BEST50":
                    dict_data['name'] = "文胸内衣" + "|" + name
                    dict_data['url'] = url
                    tap3_list.append(dict_data)
            for t in tap4_root:
                dict_data = dict()
                name = t.xpath('./a//text()')[0]
                url = "http://www.soim.co.kr" + t.xpath('./a/@href')[0]
                if name != "NEW5%" and name != "BEST50":
                    dict_data['name'] = "产后用品" + "|" + name
                    dict_data['url'] = url
                    tap4_list.append(dict_data)

            return tap1_list, tap2_list, tap3_list, tap4_list
        except Exception as ex:
            self.error_info("tap", ex)

    def tran_txt(self, txt):
        """
        转换文字
        :param txt:
        :return:
        """
        for t in self.tran_list:
            txt = txt.replace(t['key'], t['value'])
        return txt


    # 下载图片及规格excel
    def down_pic_tabel(self):
        try:
            detail_list = []
            with open(self.path_pic_down, "r", encoding="utf-8-sig", newline="") as f:
                next(f)
                reader = csv.reader(f)
                previous_url = ""
                for line in reader:
                    url = line[5]
                    # 过滤重复数据、库存不为1
                    if (previous_url != url and url.find("http") >= 0) and line[8] != "1":
                        previous_url = url
                        detail_list.append(url)
            for d in detail_list:
                if d.find("http") >= 0:
                    branduid = re.findall(r"branduid=(.*?)&", d)[0]
                    # branduid = "68669"
                    path = './' + branduid
                    path_csv = path + "/" + branduid + ".csv"
                    # 如果已经存在文件夹，不下载图片
                    isExists=os.path.exists(path)
                    if not isExists:
                        # html = requests.get(d, headers=self.headers)
                        html = HttpUtils.do_request("GET", d, self.headers, "")
                        html.encoding = "EUC-KR"
                        root = etree.HTML(html.text)
                        # 规格链接
                        if len(root.xpath('//iframe[@id="sf_chart_iframe_id"]/@src')) > 0:
                            chart_url = "http:" + root.xpath('//iframe[@id="sf_chart_iframe_id"]/@src')[0]
                        else:
                            chart_url = ""
                        # 缩略图
                        pic_multi_root = root.xpath('//li[@class="multi-img"]/div')
                        # 详情图片
                        pic1_multi_root = root.xpath('//p[@style="margin: 0px; text-align: center;"]')
                        # 内衣规格图片
                        if len(root.xpath('//div[@id="detailCnt1"]/div/p')) > 0:
                            pic2_multi_root = root.xpath('//div[@id="detailCnt1"]/div/p')

                        # 创建文件夹
                        os.makedirs(path)
                        # 下载缩略图
                        print(f"==============准备下载缩略图【{branduid}】==============")
                        self.down_pic(path, pic_multi_root)
                        # 下载详情图片
                        print(f"==============准备下载详情图片【{branduid}】==============")
                        self.down_pic(path, pic1_multi_root)
                        # # 下载内衣规格图片
                        # if len(pic2_multi_root) > 0:
                        #     print(f"==============准备下载其他图片【{branduid}】==============")
                        #     self.down_pic(path, pic2_multi_root)
                        # 获取规格并保存
                        print(f"==============准备下载表格【{branduid}】==============")
                        self.get_chart(chart_url, path_csv)
            print("==============下载完成==============")
            input()
        except Exception as ex:
            self.error_info("down_pic_tabel", ex)

    def check(self):
        try:
            source_list = []
            target_list = []
            print(f"==============加载数据数据表{self.path_product}==============")
            with open(self.path_product, "r", encoding="utf-8-sig", newline="") as f:
                next(f)
                reader = csv.reader(f)
                for line in reader:
                    # id列不为空添加
                    if line[2] != "":
                        dict_data = dict()
                        dict_data['type'] = line[0]
                        dict_data['class'] = line[1]
                        dict_data['id'] = line[2]
                        dict_data['price'] = line[3]
                        dict_data['title'] = line[4]
                        dict_data['url'] = line[5]
                        dict_data['sku1'] = line[6]
                        dict_data['sku2'] = line[7]
                        dict_data['stock'] = line[8]
                        source_list.append(dict_data)
            print(f"==============加载对比数据表{self.path_product1}==============")
            with open(self.path_product1, "r", encoding="utf-8-sig", newline="") as f:
                next(f)
                reader = csv.reader(f)
                for line in reader:
                    # id列不为空添加
                    if line[2] != "":
                        dict_data = dict()
                        dict_data['type'] = line[0]
                        dict_data['class'] = line[1]
                        dict_data['id'] = line[2]
                        dict_data['price'] = line[3]
                        dict_data['title'] = line[4]
                        dict_data['url'] = line[5]
                        dict_data['sku1'] = line[6]
                        dict_data['sku2'] = line[7]
                        dict_data['stock'] = line[8]
                        target_list.append(dict_data)
            temp_id = ""
            for t in target_list:
                # 保留基本信息为新增excel
                if t['type'] != "":
                    type = t['type']
                if t['class'] != "":
                    class_name = t['class']
                if t['price'] != "":
                    price = t['price']
                flag = False  # 新增标识。表A中id、sku1、sku2在表B中未找到。说明是新增。
                flag1 = True

                for s in source_list:
                    if t['id'] == s['id']:
                        if t['sku1'] == s['sku1'] and t['sku2'] == s['sku2']:
                            flag = True
                            if t['stock'] != s['stock']:
                                print(f"===ID:{t['id']}===原:{s['stock']}===现:{t['stock']}======")
                                # 保存对比结果表
                                with open(self.path_pic_down, "a+", encoding="utf-8-sig", newline="") as f:
                                    csv_writer = csv.writer(f)
                                    if temp_id == "" or temp_id != s['id']:
                                        temp_id = s['id']
                                        csv_writer.writerow([type, class_name, s['id'], price, s['title'],
                                                             s['url'], s['sku1'], s['sku2'], t['stock']])
                                    else:
                                        csv_writer.writerow(["", "", s['id'], "", s['title'],
                                                             s['url'], s['sku1'], s['sku2'], t['stock']])
                                break
                if flag == False:
                    # 保存对比结果表
                    with open(self.path_pic_down, "a+", encoding="utf-8-sig", newline="") as f:
                        csv_writer = csv.writer(f)
                        if temp_id == "" or temp_id != s['id']:
                            temp_id = s['id']
                            csv_writer.writerow([type, class_name, t['id'], price, t['title'],
                                                 t['url'], t['sku1'], t['sku2'], t['stock']])
                        else:
                            csv_writer.writerow(["", "", t['id'], "", t['title'],
                                                 t['url'], t['sku1'], t['sku2'], t['stock']])

                    print(f"===ID:{s['id']}===新增:{t['sku1']}==={t['sku2']}======")
            print(f"==============检查数据完成。生成对比结果表{self.path_pic_down}==============")
            input()
        except Exception as ex:
            self.error_info("check", ex)


    # 保存日志
    def error_info(self, title, txt):
        file_name = "error_" + datetime.now().strftime("%Y-%m-%d")
        with open("./" + file_name + ".txt", "a", encoding="utf-8") as fp:
            fp.write(f'========={title}======={datetime.now().strftime("%Y-%m-%d %H:%M:%S")}========' + '\n')
            fp.write(str(txt) + '\n')


if __name__ == '__main__':
    list_1_1 = {
        'name': '连衣裙',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=067&type=X'
    }
    list_1_2 = {
        'name': 'T恤',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=068&type=X'
    }
    list_1_3 = {
        'name': '衬衫',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=111&type=X'
    }
    list_1_4 = {
        'name': '上衣',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=112&type=X'
    }
    list_1_5 = {
        'name': '裤子',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=019&type=X'
    }
    list_1_6 = {
        'name': '裙子',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=786&type=Y'
    }
    list_1_7 = {
        'name': '打底裤',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=113&type=X'
    }
    list_1_8 = {
        'name': '丝袜',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=785&type=Y'
    }
    list_1_9 = {
        'name': '鞋',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=024&type=X'
    }
    list_1_10 = {
        'name': '搭配饰品',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=023&type=X'
    }
    list_2_1 = {
        'name': '外出哺乳装',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=177&type=P'
    }
    list_2_2 = {
        'name': '上衣',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=020&mcode=002&type=X'
    }
    list_2_3 = {
        'name': '连衣裙',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=020&mcode=006&type=X'
    }
    list_2_4 = {
        'name': '上下套装',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=020&mcode=005&type=X'
    }
    list_2_5 = {
        'name': '吊带',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=020&mcode=007&type=X'
    }
    list_2_6 = {
        'name': '哺乳用品',
        'url': 'http://www.soim.co.krhttp://www.soim.co.kr/shop/shopbrand.html?xcode=020&type=M&mcode=001'
    }
    list_2_7 = {
        'name': '产后打底裤',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=020&mcode=003&type=X'
    }
    list_3_1 = {
        'name': '哺乳文胸',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=001&type=X'
    }
    list_3_2 = {
        'name': '孕妇内裤',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=002&type=X'
    }
    list_3_3 = {
        'name': '束缚用品',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=003&type=X'
    }
    list_3_4 = {
        'name': '内衣&运动衣',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=004&type=X'
    }
    list_3_5 = {
        'name': '内衣套装',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=010&type=X'
    }
    list_3_6 = {
        'name': '内衣杂货',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=009&type=X'
    }
    list_3_7 = {
        'name': '泳衣',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=021&mcode=007&type=X'
    }
    list_4_1 = {
        'name': '产后用品',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=001&type=X'
    }
    list_4_2 = {
        'name': '수유/이유',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=002&type=X'
    }
    list_4_3 = {
        'name': '기저귀/물티슈',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=004&type=X'
    }
    list_4_4 = {
        'name': '건강식품',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=005&type=X'
    }
    list_4_5 = {
        'name': '유아스킨케어',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=006&type=X'
    }
    list_4_6 = {
        'name': '유아의류',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=007&type=X'
    }
    list_4_7 = {
        'name': '외출용품',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=008&type=X'
    }
    list_4_8 = {
        'name': '손수건/타올',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=009&type=X'
    }
    list_4_9 = {
        'name': '침구/기타',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=010&type=X'
    }
    list_4_10 = {
        'name': '태교DIY',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=011&type=X'
    }
    list_4_11 = {
        'name': '출산패키지',
        'url': 'http://www.soim.co.kr/shop/shopbrand.html?xcode=493&mcode=012&type=X'
    }

    beg_name = input("1.下载基础数据 2.下载对比数据 3.下载图片 4.检查库存:")
    somi = SOMI(beg_name)

    if beg_name == "1" or beg_name == "2":
        beg_type = input("输入类别：1.孕妇装 2.哺乳装 3.文胸内衣 4.产后用品")
        # 临时
        if beg_type == "1":
            class_type = input("1.连衣裙 2.T恤 3.衬衫 4.上衣 5.裤子 6.裙子 7.打底裤 8.丝袜 9.鞋 10.搭配饰品")
        if beg_type == "2":
            class_type = input("1.外出哺乳装 2.上衣 3.连衣裙 4.上下套装 5.吊带 6.哺乳用品 7.产后打底裤")
        if beg_type == "3":
            class_type = input("1.哺乳文胸 2.孕妇内裤 3.束缚用品 4.内衣&运动衣 5.内衣套装 6.内衣杂货 7.泳衣")
        if beg_type == "4":
            class_type = input("1.产后用品 2.수유/이유 3.기저귀/물티슈 4.건강식품 5.유아스킨케어 6.유아의류 7.외출용품 "
                               "8.손수건/타올 9.침구/기타 10.태교DIY 11.출산패키지")
        if beg_type == "1" and class_type == "1":
            somi.get_contents(1, "孕妇装", list_1_1['name'], list_1_1['url'])
        elif beg_type == "1" and class_type == "2":
            somi.get_contents(1, "孕妇装", list_1_2['name'], list_1_2['url'])
        elif beg_type == "1" and class_type == "3":
            somi.get_contents(1, "孕妇装", list_1_3['name'], list_1_3['url'])
        elif beg_type == "1" and class_type == "4":
            somi.get_contents(1, "孕妇装", list_1_4['name'], list_1_4['url'])
        elif beg_type == "1" and class_type == "5":
            somi.get_contents(1, "孕妇装", list_1_5['name'], list_1_5['url'])
        elif beg_type == "1" and class_type == "6":
            somi.get_contents(1, "孕妇装", list_1_6['name'], list_1_6['url'])
        elif beg_type == "1" and class_type == "7":
            somi.get_contents(1, "孕妇装", list_1_7['name'], list_1_7['url'])
        elif beg_type == "1" and class_type == "8":
            somi.get_contents(1, "孕妇装", list_1_8['name'], list_1_8['url'])
        elif beg_type == "1" and class_type == "9":
            somi.get_contents(1, "孕妇装", list_1_9['name'], list_1_9['url'])
        elif beg_type == "1" and class_type == "10":
            somi.get_contents(1, "孕妇装", list_1_10['name'], list_1_10['url'])
        elif beg_type == "2" and class_type == "1":
            somi.get_contents(1, "哺乳装", list_2_1['name'], list_2_1['url'])
        elif beg_type == "2" and class_type == "2":
            somi.get_contents(1, "哺乳装", list_2_2['name'], list_2_2['url'])
        elif beg_type == "2" and class_type == "3":
            somi.get_contents(1, "哺乳装", list_2_3['name'], list_2_3['url'])
        elif beg_type == "2" and class_type == "4":
            somi.get_contents(1, "哺乳装", list_2_4['name'], list_2_4['url'])
        elif beg_type == "2" and class_type == "5":
            somi.get_contents(1, "哺乳装", list_2_5['name'], list_2_5['url'])
        elif beg_type == "2" and class_type == "6":
            somi.get_contents(1, "哺乳装", list_2_6['name'], list_2_6['url'])
        elif beg_type == "2" and class_type == "7":
            somi.get_contents(1, "哺乳装", list_2_7['name'], list_2_7['url'])
        elif beg_type == "3" and class_type == "1":
            somi.get_contents(1, "文胸内衣", list_3_1['name'], list_3_1['url'])
        elif beg_type == "3" and class_type == "2":
            somi.get_contents(1, "文胸内衣", list_3_2['name'], list_3_2['url'])
        elif beg_type == "3" and class_type == "3":
            somi.get_contents(1, "文胸内衣", list_3_3['name'], list_3_3['url'])
        elif beg_type == "3" and class_type == "4":
            somi.get_contents(1, "文胸内衣", list_3_4['name'], list_3_4['url'])
        elif beg_type == "3" and class_type == "5":
            somi.get_contents(1, "文胸内衣", list_3_5['name'], list_3_5['url'])
        elif beg_type == "3" and class_type == "6":
            somi.get_contents(1, "文胸内衣", list_3_6['name'], list_3_6['url'])
        elif beg_type == "3" and class_type == "7":
            somi.get_contents(1, "文胸内衣", list_3_7['name'], list_3_7['url'])
        elif beg_type == "4" and class_type == "1":
            somi.get_contents(1, "产后用品", list_4_1['name'], list_4_1['url'])
        elif beg_type == "4" and class_type == "2":
            somi.get_contents(1, "产后用品", list_4_2['name'], list_4_2['url'])
        elif beg_type == "4" and class_type == "3":
            somi.get_contents(1, "产后用品", list_4_3['name'], list_4_3['url'])
        elif beg_type == "4" and class_type == "4":
            somi.get_contents(1, "产后用品", list_4_4['name'], list_4_4['url'])
        elif beg_type == "4" and class_type == "5":
            somi.get_contents(1, "产后用品", list_4_5['name'], list_4_5['url'])
        elif beg_type == "4" and class_type == "6":
            somi.get_contents(1, "产后用品", list_4_6['name'], list_4_6['url'])
        elif beg_type == "4" and class_type == "7":
            somi.get_contents(1, "产后用品", list_4_7['name'], list_4_7['url'])
        elif beg_type == "4" and class_type == "8":
            somi.get_contents(1, "产后用品", list_4_8['name'], list_4_8['url'])
        elif beg_type == "4" and class_type == "9":
            somi.get_contents(1, "产后用品", list_4_9['name'], list_4_9['url'])
        elif beg_type == "4" and class_type == "10":
            somi.get_contents(1, "产后用品", list_4_10['name'], list_4_10['url'])
        elif beg_type == "4" and class_type == "11":
            somi.get_contents(1, "产后用品", list_4_11['name'], list_4_11['url'])
        else:
            print("==============请输入正确选项==============")
    elif beg_name == "3":
        somi.down_pic_tabel()
    elif beg_name == "4":
        somi.check()
    else:
        print("==========请重新输入选项===========")



    print("==============爬取完成==============")
    input()

    # beg_name = input("1.下载基础数据。2.下载对比数据。3.下载图片。4.检查库存:")
    # if beg_name == "1" or beg_name == "2":
    #     beg_type = input("输入类别：1.孕妇装 2.哺乳装 3.文胸内衣 4.产后用品")


    # tap1_list = []
    # tap2_list = []
    # tap3_list = []
    # tap4_list = []

    # somi = SOMI(beg_name)
    # # 提取top中分类信息
    # if beg_name == "1" or beg_name == "2":
    #     tap1_list, tap2_list, tap3_list, tap4_list = somi.tap()
    #     if beg_type == "1":
    #         for t1 in tap1_list:
    #             type = t1['name'].split("|")[0]
    #             name = t1['name'].split("|")[1]
    #             url = t1['url']
    #             somi.get_contents(1, type, name, url)
    #     elif beg_type == "2":
    #         for t2 in tap2_list:
    #             type = t2['name'].split("|")[0]
    #             name = t2['name'].split("|")[1]
    #             url = t2['url']
    #             somi.get_contents(1, type, name, url)
    #     elif beg_type == "3":
    #         for t3 in tap3_list:
    #             type = t3['name'].split("|")[0]
    #             name = t3['name'].split("|")[1]
    #             url = t3['url']
    #             somi.get_contents(1, type, name, url)
    #     elif beg_type == "4":
    #         for t4 in tap4_list:
    #             type = t4['name'].split("|")[0]
    #             name = t4['name'].split("|")[1]
    #             url = t4['url']
    #             somi.get_contents(1, type, name, url)
    #     else:
    #         print("==============请输入正确选项==============")
    #     print("==============爬取完成==============")
    #     input()
    # elif beg_name == "3":
    #     somi.down_pic_tabel()
    # elif beg_name == "4":
    #     somi.check()
    # else:
    #     print("==========请重新输入选项===========")
