# -*- coding:utf8 -*-

import importlib, re, sys, json, datetime, random, time
from scrapy.selector import Selector
from scrapy.exceptions import CloseSpider
import urllib
import lxml.html
import json
try:
    from scrapy.spiders import Spider
except:
    from scrapy.spider import BaseSpider as Spider

from scrapy.http import Request, FormRequest
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from gaokaopai.items import *
from gaokaopai.dao import *

importlib.reload(sys)


class ScoreListSpider(Spider):
    name = 'sch_id'
    allow = ['wmzy.com']

    def __init__(self, *args, **kwargs):
        super(ScoreListSpider, self).__init__(*args, **kwargs)



    def start_requests(self):
        rows = selectScrapyUniversity()

        for row in rows:
            print('url==========================', row['url'])
            yield Request(row['url'],  callback=lambda response, id = row['id']: self.parse_list(response,id))



    def parse_list(self, response, id):
        li_dom = response.xpath(u"//li[contains(@id, 'add-collect')]")
        diploma = ''.join(li_dom.xpath(u"./@data-diploma").extract()).strip()
        sch_id = ''.join(li_dom.xpath(u"./@data-schid").extract()).strip()

        print('id==========================', id)
        print('sch_id==========================', sch_id)
        print('diploma==========================', diploma)

        su = ScrapyUniversityItem()
        su['table'] = 'scrapy_university_update'
        su['id'] = id
        su['sch_id'] = sch_id
        su['diploma'] = diploma
        yield su

