# !/usr/bin/env python3
# @author : walker
# @data : 2019/9/15
from config import *
import re
import sys
import time
from hashlib import md5
from bson import binary
import random
from fake_useragent import UserAgent

class use_url_get_goods_data:
    def __init__(self):
        pass

    '''
        输入url调用requests请求该页面
        param:
            goods_url:  商品得url信息
    '''
    # @staticmethod
    def use_url_get_page(self,goods_url):
        try:
            res = requests.get(URL + goods_url)
            if res.status_code == 200 or res.status_code == 304:
                print(111111111111111111111111111111111)
                soup = BeautifulSoup(res.text,'lxml')
                goods_info = []
                #查找到所有手办对应信息
                # data = soup.findAll("td")
                character_data = soup.findAll("tr")
                img_data = soup.findAll(name = "a",attrs={"class":"thumbnail"})
                img_ori_data = soup.findAll(name = "img",attrs={"class":"img-thumbnail"})
                # for img_item in img_ori_data:
                #     print("img_ori_data",img_item.get("src"))
                # print("img_ori_data",img_ori_data[0].get("src"))
                return img_data,character_data,img_ori_data[0].get("src")

            print(2222222222222222222222222222222222222)
            #设置随机sleep时间
            time_sleep = random.uniform(0,2)
            time.sleep(time_sleep)
            return self.use_url_get_page(goods_url)

        except requests.ConnectionError as e:
            print(333333333333333333333333333333)
            return self.use_url_get_page(goods_url)

    '''
        获取到img信息
        param:
            img_url: 图片地址
        return:
            img_content: 图片的
    '''
    # @staticmethod
    def get_img_info(self,img_url,i):
        #如果请求次数过多，则放弃这一块直接将没请求成功的img_url返回回去
        print("获取第多少次循环---------------------",i)
        if i <= 2:

            try:
                ua = UserAgent()
                #随机生成爬取头
                headers = {'User-Agent':ua.random}
                res = requests.get(img_url,headers = headers)
                if res.status_code == 200:
                    print("aaaaaaaaaaaaaaaa")
                    # print(res.content)
                    return res.content
                print("bbbbbbbbbbbb")
                #设置随机sleep时间
                # time_sleep = random.uniform(0,5)
                # time.sleep(time_sleep)
                i += 2
                return self.get_img_info(img_url,i)
            except requests.ConnectionError as e:
                print("cccccccccccccc")
                return self.get_img_info(img_url,i)
        else:
            return []

    '''
        对手办的信息进行解析
        param:
            character_item
        return:
            {"character_name" : character_value}
    '''
    @staticmethod
    def parse_character_item_data(character_item):
        if character_item[0].string == "名称":
            return {"name":character_item[1].string.strip()}
        elif character_item[0].string == "人物":
            return {"character":character_item[1].find("a").string.strip()}
        elif character_item[0].string == "作品":
            return {"production":character_item[1].find("a").string.strip()}
        elif character_item[0].string == "官方价格":
            return {"official_price":character_item[1].string.strip()}
        elif character_item[0].string == "发售时间":
            # time_data = re.findall("(\d+)",character_item[1].string)
            return {"launch_date":character_item[1].string.strip()}
        elif character_item[0].string == "厂家":
            return {"factory":character_item[1].find("a").string.strip()}
        elif character_item[0].string == "系列":
            return {"series":character_item[1].string.strip()}
        elif character_item[0].string == "材质":
            return {"material":character_item[1].string.strip()}
        elif character_item[0].string == "比例":
            return {"proportion":character_item[1].string.strip()}
        elif character_item[0].string == "尺寸":
            return {"size":character_item[1].string.strip()}
        elif character_item[0].string == "分类":
            return {"classification":character_item[1].string.strip()}
        elif character_item[0].string == "原型师":
            return {"prototyper":character_item[1].find("a").string.strip()}
        else:
            print("还有多的没有找到")
            sys.exit()
            return None


    '''
        解析页面,返回的是最
    '''
    # @staticmethod
    def parse_page(self,goods_url):
        img_data,character_data,ori_img = self.use_url_get_page(goods_url)
        # character_length = len(character_data)
        # print("character length",character_length)
        print("url~~~~~~~~~~~~~",goods_url)
        # #获取手办特征信息
        # goods_info_list = []
        # for goods_data_item in goods_data:
        #     # print(goods_data_item)
        #     goods_info_list.append(goods_data_item.text)

        #手办信息属性名及其对应的属性值
        character_data_list = []
        for character_data_item in character_data:
            #获取到每一行的属性名和属性值
            character_item = character_data_item.findAll(name = "td",attrs = {"nowrap" :""})
            parse_character_item = self.parse_character_item_data(character_item)
            # print(parse_character_item)
            character_data_list.append(parse_character_item)

        #获取图片地址
        img_url_list = []
        for img_item in img_data:
            # print(img_item)
            img_url = img_item.find("img").get("src")
            img_url_list.append(img_url)
        # print(character_data_list)
        #加入初始图
        img_url_list.append(ori_img)
        print(img_url_list)

        return character_data_list,img_url_list

    '''
        合并字典
    '''
    @staticmethod
    def Merge(dict1, dict2):
        res = {**dict1, **dict2}
        return res



    '''
        存储手办信息及手办图片信息
        param:
            character_data_list: 手办具体信息
            img_url_list: 手办图片信息
    '''
    def save_prototype_info_and_img_data(self,goods_url,j):
        character_data_list,img_url_list = self.parse_page(goods_url)
        img_name_list = []
        failed_img_url_list = []

        #只取前面12张图片
        for img_url in img_url_list[:12]:
            # time_sleep = random.uniform(0,5)
            # time.sleep(time_sleep)
            img_name = md5(img_url.encode('utf-8')).hexdigest()
            #图片内容
            #设置哨兵
            i = 0
            img_content = self.get_img_info(img_url,i)
            # print(img_content)

            #判断返回img_content成功了么有
            if img_content:
                # 1、使用mongodb存放数据
                # dic = {}
                # dic['img_name'] = img_name
                # dic['img_content'] = binary.Binary(img_content)

                # db_prototype_dataset_info['prototype_img__info'].save(dic)
                # db_prototype_dataset_info['prototype_img__info'].find({"name":"paper_name"}).save({"orit_name":"asdsad"})

                # 2、直接存在硬盘里面
                f = open('../imgs/{}.jpg'.format(img_name),'wb')
                f.write(img_content)
                f.close()
                img_name_list.append(img_name)
                # img_url = None
            #如果返回失败的话则直接将img_url添加failed_img_url_list中返回就行
            else:
                failed_img_url_list.append(img_url)
        #如果图片都被下载下来了的话
        if len(img_name_list) != 0:
            #失败标识符为0，代表下载图片信息和获取到商品信息成功
            failed_sign = 0
            print("第" + str(j) + "个商品信息下载完成~~~~")
            character_data_list.append({"img_name_list":img_name_list})
            character_data_list.append({"goods_url":goods_url})
            # print(character_data_list)
            property_data = {}
            for character_data_item in character_data_list:
                property_data = self.Merge(character_data_item, property_data)

            print(property_data)
            #存入手办信息
            db_prototype_dataset_info['prototype_data_info'].save(property_data)

            #将失败的img_url和其对应的goods_url返回回去保存到数据库中，用以调用
        else:
            #失败标识符为1，代表下载图片信息和获取到商品信息失败
            failed_sign = 1
            print("第" + str(j) + "个商品信息下载失败！！！！！！！！！！")
        return failed_img_url_list,failed_sign
