# -*- coding: utf-8 -*-
import scrapy, os, json, re
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals

from selenium import webdriver
from scrapy.utils.project import get_project_settings

from quinnSpider.tool.BrowserOption import BrowserOption
from quinnSpider.tool.XunleiRemote import XunleiRemote, RemoteManage

from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec

class XunleiSpider(scrapy.Spider):
    name = 'xunlei'
    allowed_domains = ['xunlei.com']
    start_url = 'http://yuancheng.xunlei.com'#/login.html
    custom_settings = {
        'LOG_LEVEL' : 'WARNING',  #控制打印输出的日志等级
        'BROWSER_FILE' :'browers',
        'DOWNLOADER_MIDDLEWARES' : {
        'quinnSpider.middlewares.MyCustomDownloaderMiddleware': None,
        'quinnSpider.middlewares.PhanttomJsMiddleware' : 560,
        },
    }


    def __init__(self):
        self.settings = get_project_settings()
        self.browser = BrowserOption().selectBrower()
        self.browser.set_page_load_timeout(30)
        self.remoteManage = RemoteManage()
        dispatcher.connect(self.spider_closed, signals.spider_closed)
        #第二个参数是信号（spider_closed:爬虫关闭信号，信号量有很多）,
        #第一个参数是当执行第二个参数信号时候要执行的方法
        
        self.cookie_file = self.settings.get('COOKIES_FILE')[0]
                
    def start_requests(self):
        yield scrapy.Request(
            self.start_url,  
            meta={'usedBrowser':True, 'isLogin':True},
            #headers=self.headers,
            callback=self.login,
            #errback=self.err_parse,
            dont_filter=True)

    def login(self, response):
        self.browser.find_element_by_id("al_u").send_keys('554287986')
        self.browser.find_element_by_id("al_p").send_keys('cqh1995118')
        self.browser.find_element_by_id("al_submit").click()
        #self.browser.implicitly_wait(6)
        import time
        time.sleep(5)
        loged_url = self.browser.current_url
        if(loged_url != self.start_url):
            print("login succesee")
            print self.browser.get_cookies()
            jsonCookies=json.dumps(self.browser.get_cookies())
            with open(self.cookie_file,'w') as f:
                f.write(jsonCookies)
        else:
            print("login fail")

        with open(self.cookie_file,'r') as f:
            saved_cookie = json.loads(f.read())   
        cookie = [str(item["name"]) + "=" + str(item["value"]) for item in saved_cookie]
        self.cookiestr = ';'.join(item for item in cookie)
        print self.cookiestr

        self.headers = {
            #'cookie' : self.cookiestr
        }
        yield scrapy.Request(
                    loged_url,  
                    meta={'usedBrowser':True}, 
                    headers=self.headers,
                    #headers=self.headers,
                    callback=self.getRemoteInfo,
                    #errback=self.err_parse,
                    dont_filter=True)

    def getRemoteInfo(self, response):
        open('out.html','w').write(self.browser.page_source.encode('utf-8'))
        # get Remote Info from id="sidebar"
        sidebar = self.browser.find_element_by_id("sidebar").find_element_by_class_name("subnav_con")
        downloaders = sidebar.find_elements_by_xpath("div")
        for download in downloaders:
            map = {}
            map["name"] = download.get_attribute("data-name")# .encode("utf-8")
            map["uid"] = download.get_attribute("data-pid")
            map["status"] = download.get_attribute("data-online")
            xunleiRemote = XunleiRemote(map)
            self.remoteManage.pushRemote(map["uid"], xunleiRemote)

        # open download element
        self.browser.find_element_by_class_name("site_new").click()
        import time
        time.sleep(1)
        
        self.browser.find_element_by_id("pop-newtask-multi-url") \
                    .send_keys("thunder://QUFmdHA6Ly95Z2R5ODp5Z2R5OEB5ZzkwLmR5ZHl0dC5uZXQ6ODM4Ni8lRTklOTglQjMlRTUlODUlODklRTclOTQlQjUlRTUlQkQlQjF3d3cueWdkeTguY29tLiVFNiU5QyVBQSVFNiU5RCVBNSVFNiU5QyVCQSVFNSU5OSVBOCVFNSU5RiU4RS5CRC43MjBwLiVFNCVCOCVBRCVFOCU4QiVCMSVFNSU4RiU4QyVFNSVBRCU5NyVFNSVCOSU5NS5ta3ZaWg==")
        time.sleep(1)
        WebDriverWait(self.browser,3,0.5).until(ec.presence_of_all_elements_located((By.ID,"pop-newtask-multi-list")))
        ele_of_all_movies = self.browser.find_element_by_id("pop-newtask-multi-list").find_elements_by_class_name("t-pop_sp_size")
        all_of_all_movies_size = 0
        for size in ele_of_all_movies:
            all_of_all_movies_size += float(size.text.encode('utf-8').replace("GB",""))
        print all_of_all_movies_size
        free_of_remote = float(re.search("(\w+\.\w+)GB", self.browser.find_element_by_id("pop-newtask-available-space").text.encode("utf-8")).group(1))
        print free_of_remote
        now_select_download = self.browser.find_element_by_id("pop-newtask-downloaders-selected").text
        if(all_of_all_movies_size < free_of_remote):
            print(now_select_download + "space is enough")
        else:
            print(now_select_download + "space is lock")

    def spider_closed(self, spider):
        print(self.name+" spider closed")
        self.browser.close()