import requests
from threading import Thread
import random
import lxml.etree
# from user_info.models import *
from bs4 import BeautifulSoup
import re


class Myspider(Thread):
    def __init__(self, page):
        super(Myspider, self).__init__()
        self.url = f'https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&wq=%E6%89%8B%E6%9C%BA&page={page}'
        self.produce_headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.182 Safari/537.36',
            'cookie': '__jdu=1320894287; areaId=3; PCSYCityID=CN_120000_120100_120119; shshshfpa=9b17488b-6a1a-d404-d13d-68b6b1d321d3-1614345152; shshshfpb=qSM8jA%2FhKzU1tpV%2FF%20OQnOA%3D%3D; _pst=jd_4c2038ee8f0c5; unick=jd_4c2038ee8f0c5; pin=jd_4c2038ee8f0c5; _tp=u7tfqnnv%2FgAfYxocZGVP7zriRiJuyo%2ByqkgXuu3yMvo%3D; qrsc=3; ipLoc-djd=3-51041-55695-0; unpl=V2_ZzNtbRcEShAgDkZUchlUA2IBFVQRXksccF9AVH9OCAUzURZUclRCFnUUR1dnGloUZwEZXEBcQx1FCEdkeBBVAWMDE1VGZxBFLV0CFSNGF1wjU00zQwBBQHcJFF0uSgwDYgcaDhFTQEJ2XBVQL0oMDDdRFAhyZ0AVRQhHZHsbXwRuCxdeS1FzJXI4dmRzGlUNZgQiXHJWc1chVEZWeBxcDSoDEF5DXksQdgFAZHopXw%3d%3d; __jdv=76161171|baidu-pinzhuan|t_288551095_baidupinzhuan|cpc|0f3d30c8dba7459bb52f2eb5eba8ac7d_0_db84d71181974368b8984f715fe1ec58|1614402200879; rkv=1.0; __jda=122270672.1320894287.1614345149.1614417997.1614424178.6; __jdc=122270672; __jdb=122270672.8.1320894287|6.1614424178; shshshfp=7de9fb65dcc209433ef13ee503b39d1d; shshshsID=b8fce3e7a274b821d29c05166cde748c_8_1614425651127; 3AB9D23F7A4B3C9B=A7QQPL5X4YKDFTTQX72Y4LGLJY76KMTODAUZT2EKA4DEBWBIJ2AICT4DXG3ZKXKBVF5P2JNU7WMQPD4MIB26M6HWRM'
        }
        self.pro_list = [
            ' 171.35.168.65:9999',
            ' 175.43.32.39:9999',
            ' 123.169.120.154:9999',
            '49.86.177.103:9999',
        ]
        self.response = ''
        self.magelist = []
        self.all_list = []

    def get_product_list(self):
        response = requests.get(url=self.url, headers=self.produce_headers, params=random.choice(self.pro_list))
        self.response = response.text

    def parse_data(self):
        html = lxml.etree.HTML(self.response)
        phone_list = html.xpath('//*[@id="J_goodsList"]/ul/li')

        # 这里xpath解析不到图片地址,改用bs4解析
        for index, phone in enumerate(phone_list):
            dic = {}
            dic["name"] = phone.xpath('./div/div[4]/a/em/text()')[0]
            dic['prize'] = phone.xpath('./div/div[3]/strong/i/text()')[0]
            dic['img_ur'] = phone.xpath('//*[@id="J_goodsList"]/ul/li/div/div[2]/div/ul/li[1]/a/img[1]/@src')
            self.all_list.append(dic)
        print(self.all_list)

    # def get_img(self, url, product_name):
    #     response = requests.get(url=url, headers=self.produce_headers, params=random.choice(self.pro_list))
    #     img_name = product_name
    #     # with open('../static')

    def run(self):
        self.get_product_list()
        self.parse_data()


sp = Myspider(5)
sp.start()
