# -*- coding: utf-8 -*-
'''
简单的爬虫实例
爬虫项目创建命令：scrapy startproject [项目名称]
例：scrapy startproject scrapySpider
爬虫执行命令：scrapy crawl [爬虫名] -o 保存的文件
例：scrapy crawl teacherSpider -o teacher.csv
'''
import scrapy
from selenium import webdriver
from scrapySpider.items import PersonInfoItem


class PersonInfoSpider(scrapy.Spider):
    name = 'teacherSpider'
    allowed_domains = ['itcast.cn']
    start_urls = ("http://www.itcast.cn/channel/teacher.shtml",)
    # start_requests方法帮助我们构造并提交了Request对象的可迭代列表
    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url, headers={'User-Agent':'Mozilla/5.0'},callback=self.parse,dont_filter=True)

    # 页面解析函数也就是构造request对象时调用callback参数指定的回调函数（默认parse方法）
    def parse(self, response):
        # filename = "../teacher.html"
        # open(filename, 'wb').write(response.body)
        teachers=[]
        # 使用xpath语法来查找元素,也可以使用BeautifulSoup框架等第三方框架
        # soup=BeautifulSoup(response.body)
        # infos=soup.find_all("div",{"class":"li_txt"})
        infos=response.xpath("//div[@class='li_txt']")
        for info in infos:
            teacher=PersonInfoItem()
            # xpath返回的是包含一个元素的列表,extract()方法返回的都是unicode字符串
            teacher['name']=info.xpath("h3/text()").extract()[0]
            teacher['title']=info.xpath("h4/text()").extract()[0]
            teacher['info']=info.xpath("p/text()").extract()[0]
            teachers.append(teacher)
        return teachers