import scrapy

import ddddocr


class DlSpider(scrapy.Spider):
    name = "dl"
    # allowed_domains = ["17k.com"]
    start_urls = ["https://so.gushiwen.cn/RandCode.ashx"]

    # def start_requests(self):
    # 抓取验证码
    # yield scrapy.Request(self.start_urls[0], dont_filter=True)

    def parse(self, response):
        code_path = '../../yzm.jpg'
        # 下载验证码图片
        with open(code_path, 'wb') as f:
            f.write(response.body)
        # 识别验证码
        result = self.captcha_image(code_path)
        # print(result)
        # 登录的url地址
        login_url = 'https://so.gushiwen.cn/user/login.aspx?from=http%3a%2f%2fso.gushiwen.cn%2fuser%2fcollect.aspx'
        # 提交过去的数据
        data = {
            '__VIEWSTATE': 'MiUl3Ve2AtHCtz6eXidIGyOGdMTCJ+7uITSC8o/TPNGPxC1epkTziGrWCsm9EvaK2mUhYdemzJ/kgDZ5nnGAGls3jO4hYfuR3hxSZU3iK5djdm8anCCuUf0k+G2vbp6njRnuYbl17cXkYEqKkV6VVLW9d2E=',
            '__VIEWSTATEGENERATOR': 'C93BE1AE',
            'from': 'http://so.gushiwen.cn/user/collect.aspx',
            'email': '793390457@qq.com',
            'pwd': 'xlg17346570232',
            'code': result,
            'denglu': '登录'
        }
        # 如果是get用Request 如果是post用FormRequest
        yield scrapy.FormRequest(login_url, formdata=data, callback=self.do_login)

    # 登录后回调的方法
    def do_login(self, response, **kwargs):
        # 将我的页面内容写入本地
        with open('我的.html', 'w', encoding='UTF-8') as f:
            f.write(response.text)

    def captcha_image(self, path):
        ocr = ddddocr.DdddOcr()
        with open(path, 'rb') as f:
            # 读取图片信息
            img_bytes = f.read()
        # 识别验证码
        res = ocr.classification(img_bytes)
        return res
