# -*- coding: utf-8 -*-
import scrapy

# import sys
#
# print("\n")
# print(sys.path)
# print("\n")
# """
# ['/usr/local/bin',
# '/usr/lib/python35.zip',
# '/usr/lib/python3.5',
# '/usr/lib/python3.5/plat-x86_64-linux-gnu',
# '/usr/lib/python3.5/lib-dynload',
# '/usr/local/lib/python3.5/dist-packages',
# '/usr/lib/python3/dist-packages',
# '/home/python/Desktop/Spider/mySpider']
# """

from mySpider.items import ItcastItem
import os


class ItcastSpider(scrapy.Spider):
    name = 'itcast'
    allowed_domains = ['itcast.cn']
    start_urls = ['http://itcast.cn/channel/teacher.shtml']

    def parse(self, response):

        # # 判定文件夹是否存在
        # if os.path.exists("./datasets") is False:
        #     os.makedirs("./datasets")


        with open("./datasets/teacher.html", "w") as f:
            f.write(response.text)

        items = []
        for each in response.xpath("//div[@class='li_txt']"):
            item = ItcastItem()
            name = each.xpath("h3/text()").extract()
            print(each.xpath("h3/text()"))
            print(name)
            title = each.xpath("h4/text()").extract()
            info = each.xpath("p/text()").extract()

            item['name'] = name[0]
            item['title'] = title[0]
            item['info'] = info[0]

            yield item

            # 采取return将会直接返回数据, 而不经过pipeline, 若需要经过pipeline则采取生成器的方式(yield)
        #     items.append(item)
        # return items
