# -*- coding: utf-8 -*-
import re
import json
import jsonpath
import time
from lxml import etree
import scrapy
from scrapy.http import Request
from bs4 import BeautifulSoup as bs
#rom scrapy.selector import Selector
import sys
import pymysql
import codecs
import os

#from dingdian.init_mysql import session

#from dingdian.models import VulNsfocus
from dingdian.init_mysql import session
from dingdian.items import MicroItem
from dingdian.models import VulMicro
from dingdian.schedule import schedule2

reload(sys)
sys.setdefaultencoding( "utf-8" )

class Myspider(scrapy.Spider):

    name = 'micro'
    allowedDomains = 'https://docs.microsoft.com'
    bash_url = 'https://docs.microsoft.com/en-us/security-updates'

    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Cookie': 'fyre-livecount=879056181274; MC1=GUID=573c9e48cdd44568bedc8333510a6e55&HASH=573c&LV=201805&V=4&LU=1526614082229; MSCC=1526633730; _ga=GA1.2.828544548.1526633737; MUID=3B5733F80065639436B7380B0465659A; MSFPC=GUID=573c9e48cdd44568bedc8333510a6e55&HASH=573c&LV=201805&V=4&LU=1526614082229; msdn=L=zh-cn; _gid=GA1.2.192319618.1526862667; graceIncr=0; smcflighting=100; _CT_RS_=Recording; WRUIDCD=1749857634206019; ARRAffinity=1536c5c4c83e388cdde16921513bf5be7bbf558d63350cef060c9447096d4f71; MS-CV=ku8GNh+2AEC0jvAF.1; MarketplaceSelectedLocale=zh-cn; MS0=0a2b6fe3aea94f9d815dff583ce17402; __CT_Data=gpv=19&ckp=cd&dm=docs.microsoft.com&apv_1022_www32=19&cpv_1022_www32=19&rpv_1022_www32=16',
        'Host': 'docs.microsoft.com',
        'Referer': 'https://docs.microsoft.com/en-us/security-updates/',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'

	}
    totalVuls = 0
    currentVuls = 0.0
    def start_requests(self):
        url = self.bash_url + '/TOC.json'
        yield Request(url,headers=self.headers,callback= self.parse)

    def parse(self, response):

        jsonresponse = json.loads(response.body)
        #print jsonresponse

        length = len(jsonpath.jsonpath(jsonresponse,'$..items[3].children..children'))
        #print length

        for i in range(0,length-12):
            base_year = 2010
            url = 'https://docs.microsoft.com/en-us/security-updates/securitybulletins/%d/securitybulletins%d'%(base_year+i,base_year+i)
            yield Request(url, callback=self.save_vuls,meta={'year':base_year+i})

    def save_vuls(self, response):
        #print(response.url)
        pageSource = response.text
        year = response.meta['year']
        vuls= bs(pageSource,'lxml').find('table').find_all('tr')
        self.totalVuls += len(vuls)
        for i in range(len(vuls)):
            tds=vuls[i].find_all('td',style="",colspan="")
            if len(tds) ==0:
                self.totalVuls -=1
                continue
            id = tds[1].get_text().strip()
            if id == "Bulletin number":
                self.totalVuls -= 1
                continue
            title = tds[2].get_text().strip()
            effectSys =tds[3].get_text().strip()

            url ='https://docs.microsoft.com/en-us/security-updates/securitybulletins/%d/%s' % (year, id)
            if (session.query(VulMicro).filter(VulMicro.url == url).all()) != []:
                print "已经解析过该网页"

                self.currentVuls += 1.0

                schedule2(self.totalVuls, self.currentVuls)
                continue

            yield Request(url, callback=self.save_vul,meta={'id':id,'title':title,'effectSys':effectSys})

    def save_vul(self,response):
        self.currentVuls+=1.0
        schedule2(self.totalVuls,self.currentVuls)
        item= MicroItem()
        vulbar = bs(response.text, 'lxml').find('div', class_='primary-holder')
        item['url'] = response.url
        item['title'] = response.meta['title']
        item['msID'] = response.meta['id']
        item['effectSys'] =response.meta['effectSys']
        #item['type'] = re.search(re.compile(r'MS\d{2}-\d+\s+-\s+(.*)'),vulbar.find('h1').get_text().strip()).group(1)
        #print item['type']

        pattern_p = re.compile(r'Published:\s+([a-zA-Z]+\s+\d{1,}(,)\s\d{4})')
        pattern_u = re.compile(r'Updated:\s+([a-zA-Z]+\s+\d{1,}(,)\s\d{4})')

        try:
            publishTime = response.xpath("//p[contains(text(),'Published')]/text()")[0].extract()
            publishTime = re.search(pattern_p, publishTime).group(1)
            item['publishTime'] = time.strftime("%Y-%m-%d", time.strptime(publishTime, "%B %d, %Y"))

        except :
            item['publishTime'] = None


        try:
            updateTime = response.xpath("//p[contains(text(),'Updated')]/text()")[0].extract()
            updateTime = re.search(pattern_u, updateTime).group(1)
            item['updateTime']=time.strftime("%Y-%m-%d", time.strptime(updateTime, "%B %d, %Y"))

        except :
            item['updateTime'] = None

        #print item['updateTime']

        pattern = re.compile(r'CVE-\d{4}-\d+')
        if re.search(pattern, vulbar.get_text()) is not None:
            item['cveID'] = re.search(pattern, vulbar.get_text()).group()
        else:
            item['cveID'] = None
        #print item['cveID']
        try:
            item['message'] = bs(response.text,'lxml').find(attrs={'id':'executive-summary'}).next_sibling.next_sibling.get_text()
        except:
            item['message'] = 'None'
        item['chinese'] = False
        item['total'] = self.totalVuls
        item['current'] = int(self.currentVuls)
        yield item







