# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request,FormRequest
import urllib.request
from csdncourse.items import CsdncourseItem

class CourseSpider(scrapy.Spider):
    name = 'course'
    header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4620.400 QQBrowser/9.7.13014.400'}
    totalCount = 20
    saveIndex = 1
    def start_requests(self):
        return  [Request("https://passport.csdn.net/account/verify;jsessionid=D3E616180A118324EC88118B70AAF786.tomcat1",meta={"cookiejar":1},callback=self.parse)]
    def parse(self, response):
        #判断是否有验证码
        yzm=response.xpath("//img[@id='yanzheng']/@src").extract()
        data = {
            "username": "zyhzzyyhh",
            "password": "zYh1992.",
            "_eventId" : "submit",
            "rememberMe" : "true"
            # "validateCode":""
        }
        if len(yzm)!=0:
            print("验证码url：" + yzm[0])
            urllib.request.urlretrieve(yzm[0],"C:/Users/admin/Desktop/yzm.png")
            print("请输入验证码")
            code = input("请到'C:/Users/admin/Desktop/yzm.png'查看验证码")
            data["validateCode"] = code
        print("正在登陆")
        return [FormRequest.from_response(response,
                                         meta={"cookiejar":response.meta["cookiejar"]},
                                         headers=self.header,
                                         formdata=data,
                                         callback=self.next,
                                         )]
    def next(self,response):
        #列表页
        print("登录成功")
        file = open("C:/Users/admin/Desktop/login.html","wb")
        file.write(response.body)
        file.close()
        for i in range(1,10):
            print("开始爬第" + str(i)+"个页面")
            url = "https://edu.csdn.net/courses/p" + str(i)
            yield Request(url, callback=self.next2, meta={"cookiejar": True})

    def next2(self,response):
        #详情页
        urlList = response.xpath("//div[@class='course_dl_list']/a/@href").extract()
        for url in urlList:
            print("详情页链接" + url)
            yield Request(url, callback=self.next3, meta={"cookiejar": True})
    def next3(self,response):
        item = CsdncourseItem()
        item["name"] = response.xpath("/html/head/title/text()").extract()[0]
        item["studentNum"] = response.xpath("//span[@class='num']/text()").extract()[0]
        item["price"] = response.xpath("//span[@class='money']/text()").extract()[0]
        item["courseNum"] = response.xpath("//span[@class='pinfo']/text()").extract()[0]
        item["index"] = self.saveIndex
        self.saveIndex = self.saveIndex + 1
        yield item


