# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request,FormRequest
import urllib.request
from csdn.items import  CsdnItem

class CsdnSpiderSpider(scrapy.Spider):
    name = 'csdn_spider'
    allowed_domains = ['csdn.net']
    start_urls = ['https://passport.csdn.net/account/login']
    header = {"UserAgent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4399.400 QQBrowser/9.7.12828.400"}
              #"Referer": "https://passport.csdn.net/account/login?ref=toolbar"}

    def start_requests(self):
        return [Request("https://passport.csdn.net/account/verify",meta={"cookiejar": 1}, callback=self.login)]

    def login(self,response):
        lt = response.xpath('//input[@name="lt"]/@value').extract()
        print(lt)
        execution = response.xpath('//input[@name="execution"]/@value').extract()
        print(execution)
        captcha = response.xpath('//img[@class="code-img"]/@src').extract()
        username = input("输入CSDN登录账号：")
        password = input("输入CSDN登录密码：")
        if len(captcha) > 0:
            print("此时有验证码!")
            print(captcha[0])
            # 将验证码存储到本地
            localpath = "D:/tmp/captcha.png"
            urllib.request.urlretrieve(captcha[0], filename=localpath)
            captcha_value = input("请到D:/tmp/中，输入验证码captcha.png的信息：")

            data = {
                "validateCode": captcha_value,
                "gps": "",
                "username": username,
                "password": password,
                "rememberMe": "true",
                "lt": lt,
                "execution": execution,
                "_eventId": "submit"
            }

        else:
            data = {
                "gps": "",
                "username": username,
                "password": password,
                "rememberMe": "true",
                "lt": lt,
                "execution": execution,
                "_eventId": "submit"
            }
            print("我在parse，此时没有验证码!")

        return [FormRequest.from_response(response,
                                          meta={"cookiejar": response.meta["cookiejar"]},
                                          # cookies=self.cookies,
                                          headers=self.header,
                                          formdata=data,
                                          callback=self.next,
                                          )]



    def parse(self, response):
        Cname = response.xpath("//div[@class='info_right no_combo no_market_price']/h1/a/text()").extract()
        print("开始分析"+Cname[0]+"课程......")
        item = CsdnItem()
        item["Coursename"]=response.xpath("//div[@class='info_right no_combo no_market_price']/h1/a/text()").extract()
        item["Student"]=response.xpath("//span[@class='num']/text()").extract()
        item["Price"]=response.xpath("//span[@class='money']/text()").extract()
        item["Time"]=response.xpath("//span[@class='pinfo']/text()").extract()
        price=item["Price"][0].replace(" ","").replace("\n","")
        fh = open("C:/胡恺健/Python/Course/" + Cname[0] + ".txt", "w",encoding='utf-8')
        fh.write("课程名称：" + Cname[0]+"\n")
        fh.write("学生人数：" + item["Student"][0]+"\n")
        fh.write("课时数：" + item["Time"][0] + "\n")
        fh.write("课程价格：" + str(price) + "\n")
        fh.close()
        return item

    def next(self,response):
        yield Request("https://my.csdn.net/", callback=self.next2, meta={"cookiejar": True})

    def next2(self,response):
        title = response.xpath("//link[@ref='canonical']/@href").extract()
        print("您的个人主页路径："+str(title))
        #thislink = response.xpath("//link[@ref='canonical']/@href").extract()
        print("登录成功！开始分析课程......")
        #urllib.request.urlretrieve("https:"+thislink[0], "C:/胡恺健/Python/Course/login_myCSDN.html")  #保存登录页
        for i in range(1,243):
            yield Request("https://edu.csdn.net/courses/p"+str(i), callback=self.ParseCourse, meta={"cookiejar": True})


    def ParseCourse(self, response):
        print("跳转成功，开始分析 本页 课程数据......")
        urlList = response.xpath("//div[@class='course_dl_list']/a/@href").extract()
        for url in urlList:
            yield Request(url,
                    meta={"cookiejar": response.meta["cookiejar"]},
                    headers=self.header,
                    callback=self.parse
                    )

