# -*- coding: utf-8 -*-
import scrapy
from scrapySpider.items import ImgInfo
from selenium import webdriver
from bs4 import BeautifulSoup
import time


class ImagespiderSpider(scrapy.Spider):
    name = 'imageSpider'
    driver = webdriver.Chrome()
    # driver.ChromeOptions().add_argument(
    #     "User-Agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.54 Safari/536.5")
    # self.driver.ChromeOptions().add_argument('--proxy-server=http://IP:Port')
    allowed_domains = ['weibo.com']
    start_urls = ('https://weibo.com/1822528455/JxW2yjlhP?from=page_1006061822528455_profile&wvr=6&mod=weibotime&type=comment',)
    filename=r"E:\python_workspace\scrapySpider\urls.html"
    imgfile=r"E:\python_workspace\scrapySpider\urls.json"

    def get_web_page(self,url,call):
        self.driver.get(url=url)
        time.sleep(5)
        call(self.driver.page_source.encode())

    def start_requests(self):
        for url in self.start_urls:
            yield self.get_web_page(url=url,call=self.parse)
            # yield scrapy.Request(url,callback=self.parse,dont_filter=True)

    def parse(self, response):
        imginfos=[]
        with open(self.filename, 'wb') as f:
            f.write(response)
        soup = BeautifulSoup(response, 'html.parser')
        img_urls = soup.select("img[class='S_line2']")
        for url in img_urls:
            i=1
            info=ImgInfo()
            info['name']="ming"+str(i)
            info["url"]="https:"+url["src"]
            imginfos.append(info)
            i=i+1
        time.sleep(3)
        self.driver.close()
        return imginfos
        # with open(self.imgfile,'wb') as img:
        #     img.write(img_urls.text.encode())
        # // img[ @class ='S_line2'] / @ href