# -*- coding:utf-8 -*-
import scrapy
from scrapy.selector import Selector
import logging
from scrapy.http import FormRequest
from scrapy.spiders import Spider
import urlparse
import sys
class loginSpider(Spider):
    name = 'loginspider1'
    # allowed_domains = ['accounts.douban.com','www.douban.com']
    # start_urls=['https://www.douban.com']
    headers = {
		'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
		'Accept-Encoding':'gzip,deflate,br',
		'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
		'Connection':'keep-alive',
		# 'Host':'accounts.douban.com',
		'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
	}

    formdata = {
		'form_email':'your username',
		'form_password':'your password',
		'login':'登录',
		'redir':'https://www.douban.com',
		'source':'None'
	}

    def start_requests(self):
		print "get the gate..."
		return [scrapy.Request(url='https://www.douban.com/accounts/login',
			headers = self.headers,
			meta={'cookiejar':1},
			callback=self.parse_login,
			)]

    def parse_login(self,response):
    	print "start logining..."
    	print response.url
    	captcha = response.xpath('//img[@class="captcha_image"]/@src').extract()
        if len(captcha)>0:
            print "need captcha..."
            print 'Copy the link:'
            link = response.xpath('//img[@class="captcha_image"]/@src').extract()[0]
            print link
            captcha_solution = raw_input('captcha_solution:')
            captcha_id = urlparse.parse_qs(urlparse.urlparse(link).query,True)['id']
            self.formdata['captcha-solution'] = captcha_solution
            self.formdata['captcha-id'] = captcha_id
            open('e:\hanhan\\douban.txt', 'w+').write(response.body)
            print self.formdata
        print "logining..."
        return [scrapy.FormRequest.from_response(
        	response,
        	meta={"cookiejar":response.meta['cookiejar']},
                            headers=self.headers,
                            formdata=self.formdata,
                            callback=self.after_login
                            )]
    def after_login(self,response):
    	print "after login ..."
    	open("e:\hanhan\\afterlogin.txt","w+").write(response.body)
        return scrapy.Request(url='https://www.douban.com/',
        	headers=self.headers,
        	meta={'cookiejar':response.meta['cookiejar']},
        	callback=self.parse_result,
        	dont_filter=True)

    def parse_result(self,response):
		print response.url
		sel = Selector(response)
		open("e:\hanhan\\ssss.txt","w+").write(response.body)
		sel = Selector(response)
		links = sel.xpath('//a[contains(@href,"//")]/@href').extract()
		for link in links:
			print link
			if not 'logout' in link:
				if not link.startswith('https://'):
				    if not link.startswith('http://'):
				    	link = "https://www.douban.com" + link
				yield scrapy.Request(url=link,meta={'cookiejar':response.meta['cookiejar']},callback=self.parse_result,dont_filter=True)
			else:
				print 'logout..'


