import scrapy
from selenium import webdriver
from scrapy.http.request import Request
from huayiQDB.controller import huayiCollector as hyc
import huayiQDB.controller.ParseCollection as pc
import logging
import datetime
import huayiQDB.controller.log as log
import huayiQDB.controller.huayiUtils as hu
import pymysql
import huayiQDB.controller.dbtools as db

class QautocollectorSpider(scrapy.Spider):
    # name = 'QAutoCollector'
    name = 'QAC'
    db_sys = db.DBSystem()
    allowed_domains = ['91huayi.com']
    logger = log.logger
    driver = webdriver.Chrome()
    pattern =[ #筛选学科分类
        '<li class="menubox_01">(.*?)</ul></li>',  #menubox_01元素列表
        '<a dept_id="(.*?)" class="menubox_03"',  #menubox_03的dept_id
        'class="menubox_03"(.*?)</a>',  #为抽取menubox_03的title待加工
        '[\u4e00-\u9fa5]',  #menubox_03的title
        'onclick="reloadCourse\(this,\'(.*?)\'\);" >',  #menubox_03的reloadCourse_id
        '<li class="listbox_01"><a dept_id="(.*?)"',  #listbox_01的dept_id
        '<li class="listbox_01">(.*?)</li>', #为抽取listbox_01的title待加工
        'class="menubox_02"(.*)'
    ]
    pattern_lsit = [   #各科下的所有课程
        '<div class="item_r">(.*?)</dl>                        <br clear="all" />                    </div>                    <br clear="all" />',
        # 筛选所需信息分组
        '<img src="../images/(.*?)\.gif',  # 关注度
        'javascript:window.top.location.href=\'(.*?)\'" title=',  # 课程链接
        'title="(.*?)"><strong>',  # 课程名字
        '<div class="txts">(.*?)</div>',  # 拆分四个<div class="txts">分组，第一个学分，第二个项目编号，第三个项目负责人，第四个单位
        '<span class="f12blue">(.*?)</span>',  # 在第二个分组中抽取，第二个项目编号.  在第三个分组抽取，第三个项目负责人
        'title=\'(.*?)\'>'  # 在第四个分组中抽取 单位
    ]
    pattern_course_list = [  #学习明细下的课程列表
        '<div class="course">(.*?)</div>                            </div>',  # 各学习分组
        '<span>{1}<a href=\'(.*?)\' target=',  # url
        '<strong>(.*?)</strong>',  # 学习名称
        '<div class="txts"(.*?)</div>',  # 老师、单位分组
        '<span class="f12blue">(.*?)</span>',  # 授课老师、单位分别筛选
    ]

    pattern_Question = [  #筛考试题选
        '<tr align="left">(.*?)</td>		</tr>',  # 0筛出单个问题数组
        '<span id="gvQuestion_question_[0-9]">(.*?)</span>',  # 1筛出单个问题题目
        'id="gvQuestion_question_id_[0-9]" value="(.*?)"',  # 2筛出单个问题题目ID
        'id="gvQuestion_result_[0-9]" value="(.*?)"',  # 3筛出答案的value
        'name="gvQuestion\$ctl0[0-9]\$rbl" value="(.*?)"',  # 4筛出单个待选答案的value列表
        '<label for="gvQuestion_rbl_[0-9]_[0-9]_[0-9]">(.*?)</label>'  # 5筛出单个待选答案的名字列表
    ]
    pattern_Nav = 'id="site_search1_hdnValue" value="\{\[(.*?)\]\}"'  # 获取页面课程列表
    study_url_list = [] #要访问的URL字典数组
    url_ware = "http://cme2.91huayi.com/ashx/add_course_ware_play_record.ashx?relation_id=cf878dfb-7e67-4b30-ae8e-ab3401193005&user_id=ee68b7ab-fa51-4122-b4cb-ac4501035ec2"  #GET跳过视频
    url_quest ="http://cme2.91huayi.com/pages/exam.aspx?cwid=cf878dfb-7e67-4b30-ae8e-ab3401193005"  #答题页  nav5

    def start_requests(self):
        self.logger.info("webdriver.Chrome开始")
        self.driver.get('http://cme2.91huayi.com/cme/index.html')
        #self.driver.save_screenshot("访问页面.png")
        cookie_view = self.driver.get_cookies()
        self.logger.info("访问首页时的cookeie:"+str(cookie_view))
        self.logger.info("页面填充账号密码开始")
        self.driver.find_element_by_id('loginAccessName').send_keys('kkkabc')
        self.driver.find_element_by_id('loginAccessPwd').send_keys('kkk123456')
        #self.driver.save_screenshot("访问页面1.png")
        print("输入验证码：")
        code = input()
        self.driver.find_element_by_id('loginAccessPwd1').send_keys(code)
        #self.logger.info("登陆前page_source："+"".join(self.driver.page_source))
        try:
            self.driver.find_element_by_xpath("//div[@class='lg_box1']/a[@class='login_btn']").click()
        except:
            print("输入验证码2：")
            self.logger.info("请求外部输入验证码")
            code = input()
            self.driver.find_element_by_id('loginAccessPwd1').send_keys(code)
            self.driver.find_element_by_xpath("//div[@class='lg_box1']/a[@class='login_btn']").click()



        self.logger.info("登陆后的cookeie:" + str(self.driver.get_cookies()))

        #self.driver.save_screenshot("001.png")
        #self.driver.quit()
        self.logger.info("开始访问：knowledge_navigation.aspx")
        yield Request('http://cme2.91huayi.com/pages/knowledge_navigation.aspx', meta={'tag':'navigation'})

    def cookie_to_dict(self, cookie_list):
        cookie_dict = {}
        for one in cookie_list:
            for name in one:
                cookie_dict[name] = one[name]
        return cookie_dict

    def parse(self, response):#处理进入导航后的响应页面
        self.logger.info("------------------------------parse-------start----------------------------")
        self.logger.info("响应页面：knowledgea_nvigation.aspx")
        self.logger.info("knowledgea_nvigation.aspx  HEADER:"+str(response.headers))
        #self.logger.info("response.text:" + str(response.text))
        course_dict = hyc.selectAllCourseFromNav(self.pattern_Nav, response.text)
        #保留
        #------------------------------------------------------------------------------------------------
        cookie = hu.cookie_to_dict(self.driver.get_cookies())
        header = {
            'Referer': 'http://cme2.91huayi.com/pages/knowledge_navigation.aspx'
        }
        for Oneclass in course_dict:
            request_type = Oneclass['id'][4:-4]  # 分类value
            request_name = Oneclass['name'][4:-4]  # 分类名称
            NS_Info = {}  # 每个学习的必要信息
            url = "http://cme2.91huayi.com/pages/course_list_v2.aspx?dept_id=" + request_type
            NS_Info['Item_Value'] = request_type #分类ID
            NS_Info['Item_Name'] = request_name #分类名称
            yield scrapy.Request(url, meta={'url_type': 'item', 'url': url, 'NS_Info': NS_Info, 'cookie': cookie,'db_sys':self.db_sys,'item_value':request_type,'id':'item'},
                                 callback=pc.Exam_navigation, cookies=cookie, headers=header, dont_filter=True)
        # ------------------------------------------------------------------------------------------------

        #调试
        # request_type = course_dict[0]["id"][4:-4] #分类value
        # request_name = course_dict[0]["name"][4:-4] #分类名称
        # NS_Info = {}  #每个学习的必要信息
        # url = "http://cme2.91huayi.com/pages/course_list_v2.aspx?dept_id=" + request_type
        # NS_Info['Item_Value'] = request_type
        # NS_Info['Item_Name'] = request_name
        # header = {
        #     'Referer': 'http://cme2.91huayi.com/pages/knowledge_navigation.aspx'
        # }
        # cookie = hu.cookie_to_dict(self.driver.get_cookies())
        # return scrapy.Request(url, meta={'url': url,'NS_Info': NS_Info,'cookie': cookie}, callback=pc.parse_AllCourse, cookies=cookie, headers=header)



