# !/usr/bin/env python3
# @author : walker
# @data : 2019/9/11

from config import *
import json

class Use_company_data_find_goods:

    def __init__(self):
        self.url = 'http://www.ukelili.com/search/search'

    '''
        根据公司地址获取页面信息
    '''
    # @staticmethod
    def use_url_get_page(self,company_data):
        try:
            #设置请求头
            # headers["Refernce"] = headers.get("Refernce") + company_data.get("company_goods_url")
            # /search/index/searchStr/Bandai.html
            headers["Refernce"] = headers.get("Refernce") + '/search/index/searchStr/Bandai.html'
            data = json.dumps({
                "searchType": "全部分类",
                "searchStr": company_data.get('company_name'),
                # "searchFactory": company_data.get('company_name'),
                "searchFactory": "全部厂家",
                "p": 5
            })
            print(company_data.get('company_name'))
            res = requests.get(self.url,params = data,headers = headers)
            print(res.status_code)
            if res.status_code == 200:
                prototype_goods_list = json.loads(res.content)
                print(prototype_goods_list)
                return prototype_goods_list
            print(11111)
            return self.use_url_get_page(company_data)
            # return use_url_get_page(company_goods_url)
        except Exception as e:
            print(22222)
            return self.use_url_get_page(company_data)
            # return use_url_get_page(company_goods_url)

    '''
        解析页面信息，返回厂商商品的详细页面
    '''
    def parse_company_page(self,company_data):
        #获取到该url页面中的详细信息
        company_page_content = self.use_url_get_page(company_data)

        # soup = BeautifulSoup(company_page_content,'lxml')
        # company_data = soup.findAll(name="div",attrs={"class" : "thumbnail"})
