# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request,FormRequest
from csdnlg.items import CsdnlgItem
import re,os
import urllib.request as ur

class LgSpider(scrapy.Spider):
    name = 'lg'
    allowed_domains = ['csdn.net']
    start_urls = ['http://csdn.net/']
    #设置报头，代理ip不稳定，此处不使用
    header={'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}


    #确认真实post地址，同时开启cookie记录
    def start_requests(self):
        url='https://passport.csdn.net/'
        return [Request(url,headers=self.header,meta={'cookiejar':1},callback=self.parse)]


    #设置登录信息，前两个信息需要测试者填写，其余自动获得。
    def parse(self, response):
        print(response.xpath('/html/head/title/text()').extract())
        print(response.xpath('//input[@name="lt"]/@value').extract())
        print(response.xpath('//input[@name="execution"]/@value').extract())
        print(response.xpath('//input[@name="_eventId"]/@value').extract())
        data={'username':'USERNAME',
              'password':'PASSWORD',
              'rememberMe':'true',
              'lt':response.xpath('//input[@name="lt"]/@value').extract()[0],
              'execution':response.xpath('//input[@name="execution"]/@value').extract()[0],
              '_eventId':response.xpath('//input[@name="_eventId"]/@value').extract()[0]
              }
        print(data)
        print('登陆中')
        #以是否加data来测试next2的结果是否一致来区分是否登陆成功
        #return [FormRequest.from_response(response, headers=self.header, meta={"cookiejar": response.meta["cookiejar"]}, callback=self.next,)]
        return [FormRequest.from_response(response, headers=self.header, meta={"cookiejar": response.meta["cookiejar"]},formdata=data, callback=self.next,)]


    #使用cookie重定向至个人消息中心证实登陆成功
    def next(self,response):
        url='http://msg.csdn.net/?ref=toolbar'
        return [Request(url,headers=self.header,meta={'cookiejar':True},callback=self.next2)]


    #下载登陆后的html
    def next2(self,response):
        print('验证是否登陆成功')
        if response.xpath('/html/head/title/text()').extract()[0]=='消息中心':
            print('登陆成功')
        else:
            print('登陆失败')
        #判断路径是否存在并下载当前html
        path = 'D:/csdn/'
        if os.path.exists(path):
            pass
        else:
            os.mkdir(path)
        path1=path+'messagecenter.html'
        html=response.body
        file=open(path1,'wb')
        file.write(html)
        file.close()
        #获得所有页面的url
        for i in range(1,241):
            url='https://edu.csdn.net/courses/k/p'+str(i)
            yield Request(url,headers=self.header,callback=self.next3)
        '''
        #测试用爬取单页
        url = 'http://edu.csdn.net/courses/k/p1'
        print('2.5')
        return Request(url, headers=self.header, callback=self.next3)'''


    #获得页面内课程的url
    def next3(self,response):
        data=response.body.decode('utf-8','ignore')
        pat1=re.compile('<div class="course_dl_list">\n.*?<a href="(.*?)" target="_blank">')
        urls=pat1.findall(data)
        for url in urls:
            yield Request(url,headers=self.header,callback=self.next4)


    #获得各个课程信息
    def next4(self,response):
        print('4')
        item=CsdnlgItem()
        item['courseNum']=response.xpath('//span[@class="pinfo"]/text()').extract()[0]
        item['stuNum']=response.xpath('//span[@class="num"]/text()').extract()[0]
        pat=re.compile('\d+\.\d+')
        price=response.xpath('//span[@class="money"]/text()').extract()[0]
        k=pat.findall(price)
        if len(k)!=0:
            item['price'] =k[0]
        else:
            item['price']=price.encode('utf-8','ignore').decode('utf-8','ignore').strip()
        item['courseTitle']=response.xpath('//h1/a/text()').extract()[0]
        yield item
