package binary

import "bytes"

func fishdb_shuPy(workDir string, ch chan bool) {

	fileName := `fishdb_shu.py`
	Binary[fileName] = new(bytes.Buffer)
	Binary[fileName].Write([]byte(
		// `#coding=gbk
		`import requests
import sys
from bs4 import BeautifulSoup
import os
import re
import time
import eventlet  #导入eventlet这个模块
# print("1")

def timeout(path,root,url):
	eventlet.monkey_patch() 
	with eventlet.Timeout(120,False):	 #设置超时时间为2秒
		judge = False
		try:
			if not os.path.exists(root):  #判断是否存在文件并下载img
				os.makedirs(root)
			if not os.path.exists(path):
				read = requests.get(url)
				with open(path, "wb")as f:
					f.write(read.content)
					f.close()
					print("文件保存成功！")
					# 告诉后端有文件更新
					print("{\"sign\":\"1\"}")
					judge = True
			else:
				print("文件已存在！")
				# 告诉后端有文件没有更新
				print("{\"sign\":\"0\"}")
				judge = True
		except:
			# 告诉后端爬取发生了错误
			print("{\"sign\":\"40163\"}")
			print("文件爬取失败！")
	if judge == False:
		# 告诉后端爬取发生了错误
		print("{\"sign\":\"40163\"}")
		print("时间过长!")
		

		
def getUrl(url):
	try:
		headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}
		read = requests.get(url,headers=headers,timeout = 60)  #获取url
		read.raise_for_status()   #状态响应 返回200连接成功
		read.encoding = read.apparent_encoding	  #从内容中分析出响应内容编码方式
		return read.text	#Http响应内容的字符串，即url对应的页面内容
	except:
		return "连接失败！"
	
def getPic(html):
	soup1 = BeautifulSoup(html, "html.parser")
	#通过分析网页内容，查找img的统一父类及属性
	all_a = soup1.find('div',style = 'width:100%').find_all('a',href = re.compile('showpic')) #img为图片的标识
	name = soup1.find('font',class_='font20').text
	for a in all_a:
		href = a['href']  #获取img标签里的src内容
		website = "http://fishdb.sinica.edu.tw/chi/" + href
		r = requests.get(website,timeout = 60)
		soup2 = BeautifulSoup(r.text, "html.parser")
		img = soup2.find('td',colspan = '2').find('img') #img为图片的标识
		src = img['src']
		img_url ="http://fishdb.sinica.edu.tw/chi/" + src
		# print(img_url)
		root = "` + workDir + `tmp_files/fishdb/" + sys.argv[1] + '/' + name  + '/'    #保存的路径
		path = root +  img_url.split('/')[-1]  #获取img的文件名
		# print(path)
		timeout(path,root,img_url)
def getPic2(html):
	soup = BeautifulSoup(html, "html.parser")
	#通过分析网页内容，查找img的统一父类及属性
	all_a = soup.find('div',style ='width:100%').find_all('a',href = re.compile('specimenpic')) #img为图片的标签
	name = soup.find('font',class_='font20').text
	for a in all_a:
		href = a['href']  #获取img标签里的src内容
		img_url = href
		img_url = ("http://fishdb.sinica.edu.tw/chi/" + href)
		soup2 = BeautifulSoup(getUrl(img_url), "html.parser")
		img = soup2.find('div',align = "center").find('img')
		src = img['src']
		# print(src)
		root = "` + workDir + `tmp_files/fishdb/"+ sys.argv[1] + '/' + name + '/'	 #保存的路径
		path = root + src.split('/')[-1]  #获取img的文件名
		# print(path)
		timeout(path,root,src)
		
if __name__ == '__main__':
	url = "http://fishdb.sinica.edu.tw/chi/showpic.php?science=" + sys.argv[1]
	html = getUrl(url)
	getPic(html)
	getPic2(html)`))

	// dir := ``
	// writeBinaryFile(workDir, dir, fileName)

	ch <- true

}
