# -*- coding: utf-8 -*-  
import requests
from lxml import etree
import json
import re


def getpage_spider(url):         #获取issue标签爬虫函数
	html=requests.get(url)       #request框架获取整个页面
	select=etree.HTML(html.text) #etree框架
	open_time=select.xpath('//div[@class="TableObject-item TableObject-item--primary"]/relative-time/text()')
	close_time=select.xpath('//div[@class="discussion-item discussion-item-closed"]/div/a[@class="timestamp"]/relative-time/text()')  
	content_field=select.xpath('//div[@class="d-table table-fixed width-full Box-row--drag-hide"]')
	
	pagenum=[]
	for each in content_field:
		commend=each.xpath('div[2]/div[@class="mt-1 text-small text-gray"]/span[@class="opened-by"]/text()')
		
		#commend_time=each.xpath('div[2]/div[@class="timeline-comment-header"]/div[2]/strong/a')
		for i in commend:
			num=re.sub("\D", "", i)

			pagenum.append(num)

	
	return pagenum


def skipspace(list):
	a=[]
	for j in list:

		if j!='':
			a.append(j)
	        
	return a	


#for i in range(1,10):
	
#url1="https://github.com/composer/satis/labels/bug"
#url2="https://github.com/composer/satis/issues?q=label%3Abug+is%3Aclosed"
#open_bugnum= skipspace(getpage_spider(url))
#close_bugnum= skipspace(getpage_spider(url2))
#print close_bugnum
