#-*- coding: UTF-8 -*-
import socket
import urllib
import urllib2
from lxml import etree

import lxml
from bs4 import BeautifulSoup
from scrapy import cmdline
cmdline.execute("scrapy crawl loldy".split())
# str=" var GvodUrls1 = \"盗墓笔记HD1280超清国语中英双字$thunder://QUFlZDJrOi8vfGZpbGV8ob5sb2y159OwzOzMw3d3dy5sb2xkeXR0LmNvbaG/tcHEubHKvMcuSEQxMjgws6zH5bn60+/W0NOiy6vX1i5tcDR8MzA3NDc3Njk5NXxCOEE2OEY5RDQ2RkZGMzlEQzAzNUYzMEJCRkUzMDA4NHxoPVRXWkJWQkdIMlNRRURCTkFCM0M3WEdZQVRTSDY0RUoyfC9aWg==$###盗墓笔记HD1280高清国语中英双字$thunder://QUFlZDJrOi8vfGZpbGV8ob5sb2y159OwzOzMw3d3dy5sb2xkeXR0LmNvbaG/tcHEubHKvMcuSEQxMjgwuN/H5bn60+/W0NOiy6vX1i5tcDR8MTcxMDAzOTYzMXxERjgyMUUzN0QxMDU3MDc3RERGMkE2NDdGNzA3RTlDNXxoPU5RTkRBSEZVNFhMUTY0U1FCT1dRVldKS0MyQjNWQ042fC9aWg==$###\";echoDown(GvodUrls1,1); "
# start = str.find('\"')
# end = str.rfind('\"')
# print start
# print end
# print str[start+1:end]



# socket.setdefaulttimeout(5)
# User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
# header = {}
# header['User-Agent'] = User_Agent
#
# url = 'http://www.xicidaili.com/nn/'
# url_text = "http://ip.chinaz.com/getip.aspx"
# req = urllib2.Request(url,headers=header)
# res = urllib2.urlopen(req).read().decode('utf-8')
# selector = lxml.html.fromstring(res)
# ips = selector.xpath('//div/table[@id="ip_list"]/tr/td[2]/text()')
# kou = selector.xpath('//div/table[@id="ip_list"]/tr/td[3]/text()')
#
# print "\n---->"
# print ips
#
# f = open("./proxy","w")
#
# for x in range(0,len(ips)):
#     ip_temp = "http://"+ips[x]+':'+kou[x]
#     propy = {"http": ip_temp}
#     print ip_temp
#     try:
#         res = urllib.urlopen(url_text, proxies=propy).read()
#         f.write(ip_temp+'\n')
#         print res
#     except Exception, e:
#         print propy
#         print e
#         continue
# print len(ips)

# User_Agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0'
# header = {}
# header['User-Agent'] = User_Agent
#
#
# socket.setdefaulttimeout(5)
# f = open("./proxy")
# lines = f.readlines()
# url = "http://ip.chinaz.com/getip.aspx"
# for i in range(0,len(lines)):
#     ip = lines[i].strip("\n")
#     print ip
#     propy  = {"http":ip}
#     try:
#         res = urllib.urlopen(url, proxies=propy).read()
#         print res
#     except Exception, e:
#         print propy
#         print e
#         continue