# !/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import json

import re
import scrapy as scrapy
import time
from scrapy import Selector, FormRequest


class ZhifuLogin(scrapy.Spider):
    name = "zhihuLogin"
    # custom_settings = {
    #     'ITEM_PIPELINES': {'ChangeCustIdPipeline': 300},
    # }
    headers = {
        "Accept": "*/*",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q = 0.8,zh-TW;q=0.6,zh;q=0.4",
        "Connection": "keep-alive",
        "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3192.0 Safari/537.36",
        "Host": "www.zhihu.com",
        "Origin": "https://www.zhihu.com",
        "Referer": "https://www.zhihu.com/",
        "X-Requested-With": "XMLHttpRequest",
        "X-Xsrftoken": "2d4753f9f749ce149ac44fc9f035ff50"
    }
    # captcha:{"img_size":[200,44],"input_points":[[16.475009999999997,23.7],[35.475,24.7],[62.475,20.7],[87.475,25.7],[108.475,20.7],[128.475,22.7],[159.475,22.7]]}
    point = [[200, 44], [16.475009999999997, 23.7], [35.475, 24.7], [62.475, 20.7], [87.475, 25.7], [108.475, 20.7],
             [128.475, 22.7], [159.475, 22.7]]

    def start_requests(self):
        yield scrapy.Request("https://www.zhihu.com/#signin",
                             callback=self.parse_login,
                             meta={'cookiejar': 1},
                             headers=self.headers
                             )

    def parse_login(self, response):
        filename = 'quotes-zhihu1.html'
        # print (response.request.headers.getlist('Cookie'))
        # print ("***********************************")
        # print (response.headers.getlist('Set-Cookie'))
        # print ("*******************************")
        with open(filename, 'wb') as f:
            f.write(response.body)
            f.close()

        # 下面这句话用于抓取请求网页后返回网页中的_xsrf字段的文字, 用于成功提交表单
        xsrf = Selector(response).xpath('//input[@name="_xsrf"]/@value').extract()[0]
        print (xsrf)
        self.headers['X-Xsrftoken'] = xsrf
        if xsrf:
            captcha_url = "https://www.zhihu.com/captcha.gif?r=%s&type=login&lang=cn" % (int(time.time() * 1000))
            # scrapy会默认把Request的cookie放进去
            return scrapy.Request(captcha_url,
                                  callback=self.after_login,
                                  meta={'cookiejar': response.meta['cookiejar'], "xsrf": xsrf},
                                  headers=self.headers
                                  )
            # return [FormRequest.from_response(response,
            #                               meta={'cookiejar': response.meta['cookiejar']},  # 注意这里cookie的获取
            #                               headers=self.headers,
            #                               dont_filter=True,
            #                               formdata={
            #                                   '_xsrf': xsrf,
            #                                   'phone_num': '15223782694',
            #                                   'password': 'xujiang1994323',
            #                                   'captcha_type':'cn',
            #                                   'captcha':'',
            #                               },
            #                               callback=self.after_login
            #                               )]

    def after_login(self, response):
        # 保存并打开验证码
        with open('captcha.gif', 'wb') as f:
            f.write(response.body)
            f.close()
            # 自动打开刚获取的验证码
        # print (response.request.headers.getlist('Cookie'))
        # print ("***********************************")
        # print (response.headers.getlist('Set-Cookie'))
        # print ("*******************************")
        captcha = {
            'img_size': [200, 44],
            'input_points': [],
        }
        points = [[22.796875, 22], [42.796875, 22], [63.796875, 21], [84.796875, 20], [107.796875, 20],
                  [129.796875, 22],
                  [150.796875, 22]]
        seq = input('请输入倒立字的位置\n>')
        for i in seq:
            captcha['input_points'].append(points[int(i) - 1])
        verificationCode = json.dumps(captcha)

        return scrapy.FormRequest("https://www.zhihu.com/login/phone_num",
                                  meta={'cookiejar': response.meta['cookiejar']},  # 注意这里cookie的获取
                                  headers=self.headers,
                                  dont_filter=True,
                                  formdata={
                                      '_xsrf': response.meta['xsrf'],
                                      'phone_num': '15223782694',
                                      'password': 'xujiang1994323',
                                      'captcha_type': 'cn',
                                      'captcha': verificationCode,
                                  },
                                  callback=self.vercode_login
                                  )

    def vercode_login(self, response):
        # 保存并打开验证码
        with open('login.html', 'wb') as f:
            f.write(response.body)
            f.close()
            #
            # print (response.request.headers.getlist('Cookie'))
            # print ("****************111*******************")
            # print (response.headers.getlist('Set-Cookie'))
            # print ("******************111*************")

            # 请求Cookie
            # Cookie = response.request.headers.getlist('Cookie')
            # print 'Cookie', Cookie
            # 响应Cookie
            # Cookies = response.headers.getlist('Set-Cookie')

        # headers = {
        # "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        # "Accept-Encoding":"gzip, deflate, br",
        # "Accept-Language":"zh-CN,zh;q=0.8",
        # "Cache-Control":"max-age=0",
        # "Connection":"keep-alive",
        # "Host":"www.zhihu.com",
        # "Upgrade-Insecure-Requests":1,
        # "User-Agent": "Mozilla/5.0 (Windows NT 10.0;Win64;x64) AppleWebKit/537.36(KHTML, likeGecko) Chrome/62.0.3192.0 Safari/537.36"
        # }
        # print (response.meta['cookiejar'])
        return scrapy.Request("https://www.zhihu.com/",
                              meta={'cookiejar': response.meta['cookiejar']},
                              callback=self.parse,
                              headers=self.headers,
                              dont_filter=True
                              )

        # return [FormRequest.from_response(response,
        #                                   meta={'cookiejar': response.meta['cookiejar']},
        #                                   callback=self.parse,
        #                                   headers=self.headers,
        #                                   dont_filter=True
        #
        # )]

    def parse(self, response):
        filename = 'login-home.html'
        with open(filename, 'wb') as f:
            f.write(response.body)
            f.close()

        cookies = response.request.headers.getlist('Cookie')
        with open("cookie_file.txt", 'wb+') as f:
            print ("**********************************")
            print (cookies)
            print ("*********************************")
            print (response.headers.getlist('Set-Cookie'))
            print ("***********************************")
            for cookie in cookies:
                print (cookie)
                f.write(str(cookie) + '\n')
            f.close()
        with open('cookie_file.txt','r') as f:
            cookies = f.read()
            f.close()
        print "*********************************"
        print cookies
        print '[\''+cookies+'\']'


        token = response.css('#data::attr(data-state)').extract()
        # print (token)
        obj = re.search('session_token=(.*?)&', str(token), flags=0)
        print obj.group(), obj.group(1)

        baseurl = "https://www.zhihu.com/api/v3/feed/topstory"
        queryString ={
            "action_feed":"True",
            "limit":"10",
            "session_token":obj.group(1),
            "action":"down",
            "after_id":"9",
            "desktop":"true"
        }
        baseurl = baseurl+"?"
        for k,v in queryString.items():
            baseurl = baseurl+k+"="+v+"&"
        str_list = list(baseurl)
        str_list.pop()
        url = "".join(str_list)
        header = copy.deepcopy(self.headers)
        header['accept'] = 'application/json, text/plain, */*'
        header['authorization'] = 'Bearer Mi4xZ3lRYkFnQUFBQUFBZ0FJSVZVUjlEQmNBQUFCaEFsVk42c19fV1FBdWpoeFVWTGh0cGJJSGNFclZTMGNHMTBISUtn|1507345130|7ffa7681e9edbe14c427f8e1f95957c1a7697e8f'
        header['X-API-VERSION'] = '3.0.53'
        header['X-UDID'] = 'AIACCFVEfQyPTsUw26j9zsjGec2o3CNs2fc='
        print url
        return scrapy.Request(url,
                              meta={'cookiejar': response.meta['cookiejar']},
                              callback=self.parse_get_items,
                              headers=header,
                              dont_filter=True
                              )
        
    def parse_get_items(self,response):
        filename = 'login-home-items.html'
        with open(filename, 'wb') as f:
            f.write(response.body)
            f.close()
        








        # print (response.request.headers.getlist('Cookie'))
        # print ("***********************************")
        # print (response.headers.getlist('Set-Cookie'))
        # print ("*******************************")
