# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
from csdn.items import CsdnItem
import urllib.request
import re
import time
import sys
import os
from ctypes import *


class C1Spider(scrapy.Spider):
    name = 'c1'
    allowed_domains = ['csdn.net']
    # start_urls = ['http://csdn.net/']

    headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}

    def start_requests(self):
        return [Request("https://passport.csdn.net/account/verify",
                        meta={"cookiejar": 1},
                        headers=self.headers,
                        callback=self.parse
                        )]

    # 注意这里是先爬取一下登录页面，根据页面返回的信息（取一些必要的表单值），作下一步判断
    def parse(self, response):
        print("页面返回，开始取数据:")
        gps = response.xpath("//input[@name='gps']/@value").extract()
        rememberMe = response.xpath("//input[@name='rememberMe']/@value").extract()
        lt = response.xpath("//input[@name='lt']/@value").extract()
        execution = response.xpath("//input[@name='execution']/@value").extract()
        _eventId = response.xpath("//input[@name='_eventId']/@value").extract()
        validateCode_url = response.xpath("//img[@id='yanzheng']/@src").extract()

        if len(gps) == 0 or len(rememberMe) == 0 or len(lt) == 0 or len(execution) == 0 or len(_eventId) == 0 :
            print("处理出错")
            return

        # 首先判断是否有验证码
        if len(validateCode_url) == 0:
            print("没有验证码")
            data = {
                "gps": gps[0],
                "username": "18575599277",
                "password": "domyself2013",
                "rememberMe": rememberMe[0],
                "lt": lt[0],
                "execution": execution[0],
                "_eventId": _eventId[0],
            }
        else:
            print("有验证码，正在保存至本地 ......")

            # 先保存验证码并识别
            local_path = "yzm\\captcha.png"
            urllib.request.urlretrieve(validateCode_url[0], local_path)
            print("开始识别验证码 ......")
            try:
                validateCode = self.get_capval_from_3rdAPI(local_path)
                print("识别结果为："+str(validateCode))
            except Exception as err:
                print("出现错误：" + str(err))
                return

            if validateCode == "":
                print("未能识别")
                return

            data = {
                "gps": gps[0],
                "username": "18575599277",
                "password": "domyself2013",
                "rememberMe": rememberMe[0],
                # 验证码字段
                "validateCode": validateCode[0],
                "lt": lt[0],
                "execution": execution[0],
                "_eventId": _eventId[0],
            }

        print("正在登录，请稍等 ......")
        return [FormRequest.from_response(response,
                                          meta={"cookiejar": response.meta["cookiejar"]},
                                          headers=self.headers,
                                          formdata=data,
                                          callback=self.jump_to_my_homepage,
                                          )]

    # 跳转到个人首页
    def jump_to_my_homepage(self, response):
        yield Request("http://my.csdn.net/",
                      headers=self.headers,
                      callback=self.parse_my_homepage,
                      meta={"cookiejar": True})

    # 解析个人首页
    def parse_my_homepage(self, response):
        title = response.xpath("/html/head/title/text()").extract()
        # print("调试：title="+str(title))
        if title[0] == "我的CSDN":
            print("登录成功，保存页面")
            with open("login_success.html", "wb") as fh:
                fh.write(response.body)
                fh.close()
                print("保存完毕，文件名为：login_success.html")
        else:
            print("登录失败，当前页面是："+str(title)+",请检查(包括验证码)")

        # 跳转到课程首页并解析
        yield Request("http://edu.csdn.net/courses",
                      headers=self.headers,
                      callback=self.parse_courses_homepage,
                      meta={"cookiejar": True})

    # 解析课程主页，得到课程总数，页面总数
    def parse_courses_homepage(self, response):
        data = response.body.decode("utf-8", "ignore")
        rst = re.compile("共(.*?)条数据").findall(data)
        if len(rst)==0:
            print("获取课程总数出错，请检查")
            return
        else:
            all_cnt = int(rst[0])
            cnt_per_page = 20
            if all_cnt//cnt_per_page == 0:
                page_cnt = all_cnt//cnt_per_page
            else:
                page_cnt = all_cnt // cnt_per_page + 1

            print("共有 "+str(all_cnt)+" 项课程，共有 "+str(page_cnt)+" 页")

        for page in range(1, page_cnt+1):
            per_course_page_url = "https://edu.csdn.net/courses/p"+str(page)
            # 解析每一页课程页
            print("开始解析每一页课程信息 ......")
            yield Request(per_course_page_url,
                          headers=self.headers,
                          callback=self.parse_per_course_page,
                          meta={"cookiejar": True})
            # 每2秒新开一个网页采集
            time.sleep(2)

    # 解析每一页课程页，得到每项课程的详细信息
    def parse_per_course_page(self, response):
        page = re.compile('https://edu.csdn.net/courses/p(\d{1,})').findall(str(response.url))
        print("正在解析第【"+str(page[0])+"】页的课程信息 ......")
        data = response.body.decode("utf-8", "ignore")
        title = response.xpath("//div[@class='course_dl_list']/a/dl/dt/div/span[@class='title']/text()").extract()
        # 这里应该会匹配出多个，把课时也匹配出来，要用正则
        # teacher = re.compile('<div class="titleInfor">.*?</div>.*?<p>(.*?)</p>', re.S).findall(data)
        teacher = re.compile('<p>讲师：(.*?)</p>', re.S).findall(data)
        # stu_cnt = response.xpath("")
        price = response.xpath("//div[@class='course_dl_list']/a/dl/dt/p[@class='clearfix']/i/text()").extract()
        # 这里应该会匹配出多个，要用正则
        times = re.compile('<div class="titleInfor">.*?</div>.*?<p>.*?</p>.*?<p><em>(.*?)</em>', re.S).findall(data)
        link = response.xpath("//div[@class='course_dl_list']/a/@href").extract()
        # print("\n\r课程名称：%s,\n\r课程老师：%s,\n\r课程价格：%s,\n\r课程课时：%s,\n\r课程链接：%s" % (title[0], teacher[0], price[0], time[0], link[0]))

        # 进一步处理价格的格式
        for i in range(0, len(price)):
            pat = "￥\d{1,}.\d{1,}"
            rst = re.compile(pat).findall(price[i])
            if len(rst) != 0:
                price[i] = rst[0]
            else:
                price[i] = "未知"

        if len(title) == 0 or len(teacher) == 0 or len(price) == 0 or len(times) == 0 or len(link) == 0 :
            print("处理出错，部分数据为 0，请检查")
            return
        else:
            cnt = len(title)
            if len(teacher) != cnt:
                print("授课老师数量为: "+str(len(teacher))+" , 与 课程数量: "+str(cnt)+" 不匹配，请检查")
                return
            elif len(price) != cnt:
                print("课程价格数量为: " + str(len(price)) + " , 与 课程数量: " + str(cnt) + " 不匹配，请检查")
                return
            elif len(times) != cnt:
                print("课程课时数量为: " + str(len(times)) + " , 与 课程数量: " + str(cnt) + " 不匹配，请检查")
                return
            elif len(link) != cnt:
                print("课程链接数量为: " + str(len(link)) + " , 与 课程数量: " + str(cnt) + " 不匹配，请检查")
                return
            print("\t当前页面共获取到 "+str(cnt)+" 项课程")

        # 爬取学员数量
        for i in range(0, cnt):
            print("\t\t开始爬取第【"+str(page[0])+"】页，第【"+str(i+1)+"】项课程的信息 ......")
            yield Request(link[i],
                          headers=self.headers,
                          callback=lambda response, name=title[i], teacher=teacher[i], price=price[i], times=times[i]: self.get_stu_cnt(response, name, teacher, price, times),
                          meta={"cookiejar": True})
            time.sleep(2)

    # 每个课程，进入其详细页获取学员数量
    def get_stu_cnt(self, response, name, teacher, price, times):
        print("\t\t\t正在爬取学员数量 ......")
        cnt = response.xpath("//span[@class='num']/text()").extract()
        if len(cnt) == 0:
            print("\t\t\t\t爬取学员数量出错")
        else:
            item = CsdnItem()
            item["name"] = name
            item["teacher"] = teacher
            item["stu_cnt"] = cnt[0]
            item["price"] = price
            item["times"] = times
            yield item


    def get_capval_from_3rdAPI(self, local_path):
        # 第三方打码平台库文件存放路径
        dlllib_path = "yzm\\yundamaAPI-x64.dll"
        # 验证码文件路径
        cap_pic_path = str.encode(local_path)
        # 1. http://www.yundama.com/index/reg/developer 注册开发者账号
        # 2. http://www.yundama.com/developer/myapp 添加新软件
        # 3. 使用添加的软件ID和密钥进行开发，享受丰厚分成
        appId = 4380
        appKey = str.encode('730641d0e2e578ef51abe9e49c02cd90')
        # 注意这里是普通会员账号，不是开发者账号，注册地址 http://www.yundama.com/index/reg/user
        # 开发者可以联系客服领取免费调试题分
        username = str.encode('mahonesun')
        password = str.encode('www558668')
        # 例：1004表示4位字母数字，不同类型收费不同。5000 代表不定长英文字母
        # 请准确填写，否则影响识别率。
        # 在此查询所有类型 http://www.yundama.com/price.html
        codetype = 1005

        YDMApi = windll.LoadLibrary(dlllib_path)
        print('\r\n>>>正在登陆打码平台...')
        # 第一步：初始化云打码，只需调用一次即可
        YDMApi.YDM_SetAppInfo(appId, appKey)
        # 第二步：登陆云打码账号，只需调用一次即可
        uid = YDMApi.YDM_Login(username, password)

        if uid > 0:
            print('>>>正在获取余额...')
            # 查询账号余额，按需要调用
            balance = YDMApi.YDM_GetBalance(username, password)
            print('登陆成功，用户名：%s，剩余题分：%d' % (username, balance))
            print('\r\n>>>正在普通识别...')
            # 第三步：开始识别
            # 分配30个字节存放识别结果
            result = c_char_p(b"                              ")
            # result = str.encode("                              ")

            # 普通识别函数，需先调用 YDM_SetAppInfo 和 YDM_Login 初始化
            captchaId = YDMApi.YDM_DecodeByPath(cap_pic_path, codetype, result)
            print("普通识别：验证码ID：%d，识别结果：%s" % (captchaId, result.value))
            return bytes.decode(result.value)         
        else:
            print('登陆失败，错误代码：%d' % uid)
            print('\r\n>>>错误代码请查询 http://www.yundama.com/apidoc/YDM_ErrorCode.html')
            return str("")


        print("识别结束")

