import re
import scrapy #导入scrapy包
from bs4 import BeautifulSoup
from scrapy.http import Request ##一个单独的request的模块，需要跟进URL的时候，需要用它
from Downmaterial.items import DownmaterialItem #这是我定义的需要保存的字段，（导入dingdian项目中，items文件中的DingdianItem类）
import requests
import os

class Myspider(scrapy.Spider):
    name = 'Downmaterial'
    allowed_domains = ['pp.163.com/']
    #http://deer-vision.pp.163.com/
    bash_url = 'http://tangyicherry.pp.163.com/'
    bashurl = '.html'
    def start_requests(self):
        yield Request(self.bash_url, self.parse)
    def parse(self, response):
        tds = BeautifulSoup(response.text,'lxml').find_all('li',class_ = 'w-cover')
        print (tds)
        #title = BeautifulSoup(response.text,'lxml').find('h2',class_ = 'host-nname')
        #self.titlename = title.find('a').get_text()
        #self.mkdir(self.titlename)
        for td in tds:
            subtitle = td.find('a')['title']
            url = td.find('a')['href']
            self.mkdir(subtitle)
            yield Request(url,callback=self.infoma,dont_filter=True,meta = {'subtitle':subtitle})
    def infoma(self,response):
        item = DownmaterialItem()
        item['title'] = response.meta['subtitle']
        #os.chdir(os.path.join("G:\material", self.titlename))
        soup = BeautifulSoup(response.text,'lxml')
        tds = soup.find_all('img',class_='z-tag data-lazyload-src')
        imgurls = []
        for td in tds:
            imgurl = td['data-lazyload-src']
            imgurls.append(imgurl)
        item['url'] = imgurls
        content = soup.find('div',class_='g-mainwraper g-mainwraper-picsetinfo').find('p')
        info = str(content).split('<br/>')[1]
        item['equipment'] = str(info)
        hearttext = soup.find('div',class_='g-mainwraper g-mainwraper-picsetinfo').find('article').get_text()
        item['hearttext'] = str(hearttext)
        print (item)
        return  item
    def save(self, img_url):
        name = img_url[-23:-4]
        img = self.request(img_url)
        with open(name + '.jpg', 'ab') as f:
            f.write(img.content)
    def mkdir(self, path):
        path = path.strip()
        bashdir = "G:\摄影素材"+'\\'
        isExists = os.path.exists(os.path.join(bashdir, path))
        if not isExists:
            print(u'建了一个名字叫做', path, u'的文件夹！')
            print (os.path.join(bashdir, path))
            os.makedirs(os.path.join(bashdir, path))
            #os.chdir(os.path.join("G:\material", path)) ##切换到目录
            return True
        else:
            print(u'名字叫做', path, u'的文件夹已经存在了！')
            return False
    def request(self, url):
        headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
        content = requests.get(url, headers=headers)
        return content