package binary

import (
	"bytes"
	"log"

	"github.com/gogf/gf/text/gregex"
)

func fishdb_kePy(workDir string, ch chan bool) {

	fileName := `fishdb_ke.py`
	Binary[fileName] = new(bytes.Buffer)
	inf, err := gregex.ReplaceStringFunc("\t",
		//	`#coding=gbk
		//	import	requests
		`import	requests
import	sys
from	bs4	import	BeautifulSoup
import	os
import	re
import	time
import	eventlet	#导入eventlet这个模块


#ke	=	['F096','F097','F094','F095','F084','F079','F074','F081','F156','F151','F506','F505','F187','F048','F313','F304','F245','F366','F351','F474',
#			'F370','F378','F355','F393','F392',	'F331',	'F466',	'F376',	'F362',	'F399',	'F467',	'F412',	'F377',	'F380',	'F371',	'F338',	'F475',	'F356',
#			'F364',	'F374',	'F381',	'F480',	'F481',	'F354',	'F453',	'F365',	'F382',	'F373',	'F414',	'F472']
ke	=	[]
ke.append(sys.argv[1])


def	timeout(path,root,url):
	eventlet.monkey_patch()	
	with	eventlet.Timeout(120,False):		#设置超时时间为2秒
		judge	=	False
		try:
			if	not	os.path.exists(root):	#判断是否存在文件并下载img
				os.makedirs(root)
			if	not	os.path.exists(path):
				read	=	requests.get(url)
				with	open(path,	"wb")as	f:
					f.write(read.content)
					f.close()
					print("文件保存成功！")
					#	告诉后端有文件更新
					print("{\"sign\":\"1\"}")
					judge	=	True
			else:
				print("文件已存在！")
				#	告诉后端有文件没有更新
				#	print("{\"sign\":\"0\"}")
				judge	=	True
		except:
			#	告诉后端爬取发生了错误
			print("{\"sign\":\"40163\"}")
			print("文件爬取失败！")
	if	judge	==	False:
		#	告诉后端爬取发生了错误
		print("{\"sign\":\"40163\"}")
		print("时间过长!")
		
def	get_data(url):
	headers	=	{'User-Agent':'Mozilla/5.0	(Windows	NT	10.0;	Win64;	x64)	AppleWebKit/537.36	(KHTML,	like	Gecko)	Chrome/80.0.3987.149	Safari/537.36'}
	r	=	requests.get(url,headers=headers,timeout	=	60)
	soup	=	BeautifulSoup(r.text,	'html.parser')
	a	=	soup.find_all('td',class_='tdHead2')
	Ke	=	a[1].text	+	a[5].text
	for	i	in	range(13,len(a),3):
		xueming.append(a[i].text)
	for	i	in	range(14,len(a),3):
		chinese.append(a[i].text)
	return	Ke

'''
思路：获取网址
		获取图片地址
		爬取图片并保存
'''
#	获取网址
def	getUrl(url):
	try:
		headers	=	{'User-Agent':'Mozilla/5.0	(Windows	NT	10.0;	Win64;	x64)	AppleWebKit/537.36	(KHTML,	like	Gecko)	Chrome/80.0.3987.149	Safari/537.36'}
		read	=	requests.get(url,headers=headers,timeout	=	60)	#获取url
		read.raise_for_status()		#状态响应	返回200连接成功
		read.encoding	=	read.apparent_encoding		#从内容中分析出响应内容编码方式
		return	read.text	#Http响应内容的字符串，即url对应的页面内容
	except:
		return	"连接失败！"

def	getPic(html,M,F,S,K):
	soup1	=	BeautifulSoup(html,	"html.parser")
	#通过分析网页内容，查找img的统一父类及属性
	all_a	=	soup1.find('div',style	=	'width:100%').find_all('a',href	=	re.compile('showpic'))	#img为图片的标识
	for	a	in	all_a:
		href	=	a['href']	#获取img标签里的src内容
		website	=	"http://fishdb.sinica.edu.tw/chi/"	+	href
		r	=	requests.get(website,timeout	=	60)
		soup2	=	BeautifulSoup(r.text,	"html.parser")
		img	=	soup2.find('td',colspan	=	'2').find('img')	#img为图片的标识
		src	=	img['src']
		img_url	="http://fishdb.sinica.edu.tw/chi/"	+	src
		print(img_url)
		root	=	"`+workDir+`tmp_files/fishdb/"+	k	+	'/'	+	K	+	'/'	+	M	+	'('	+	F	+	'	'	+	S	+	')'	+	'/'	#保存的路径
		path	=	root	+	img_url.split('/')[-1]	#获取img的文件名
		print(path)
		timeout(path,root,img_url)
def	getPic2(html,M,F,S,K):
	soup	=	BeautifulSoup(html,	"html.parser")
	#通过分析网页内容，查找img的统一父类及属性
	all_a	=	soup.find('div',style	='width:100%').find_all('a',href	=	re.compile('specimenpic'))	#img为图片的标签
	for	a	in	all_a:
		href	=	a['href']	#获取img标签里的src内容
		img_url	=	href
		img_url	=	("http://fishdb.sinica.edu.tw/chi/"	+	href)
		soup2	=	BeautifulSoup(getUrl(img_url),	"html.parser")
		img	=	soup2.find('div',align	=	"center").find('img')
		src	=	img['src']
		print(src)
		root	=	"`+workDir+`tmp_files/fishdb/"+	k	+	'/'	+	K	+	'/'	+	M	+	'('	+	F	+	'	'	+	S	+	')'	+	'/'	#保存的路径
		path	=	root	+	src.split('/')[-1]	#获取img的文件名
		print(path)
		timeout(path,root,src)
#	主函数
if	__name__	==	'__main__':
	print(ke)
	for	k	in	ke:
		chinese	=	[]
		xueming	=	[]
		url	=	"http://fishdb.sinica.edu.tw/chi/family.php?id="	+	k
		K	=	get_data(url)
		for	i	in	range(len(xueming)):
			First	=	xueming[i].split('	')[0]
			Second	=	xueming[i].split('	')[1]
			Mingzi	=	chinese[i]
			print(chinese[i])
			html_url=getUrl("http://fishdb.sinica.edu.tw/chi/showpic.php?science="	+	First	+"%20"	+			Second)
			try:
			getPic(html_url,Mingzi,First,Second,K)
			getPic2(html_url,Mingzi,First,Second,K)
			except:
		print("No	such	of	fish")

#/home/g/photo/`, func(s string) string {
			return "   "
		})

	if err != nil {
		log.Panicln(err)
	}

	Binary[fileName].Write([]byte(inf))

	// dir := ``
	// writeBinaryFile(workDir, dir, fileName)

	ch <- true

}
