package binary

import "bytes"

func fundiving_shuPy(workDir string, ch chan bool) {

	fileName := `fundiving_shu.py`
	Binary[fileName] = new(bytes.Buffer)
	Binary[fileName].Write([]byte(
		// `#coding=gbk
		// import requests
		`import requests
from bs4 import BeautifulSoup
import os
import re
import time
import eventlet  #导入eventlet这个模块
import sys

def timeout(path,root,url,ID):
	eventlet.monkey_patch() 
	with eventlet.Timeout(120,False):	  #设置超时时间为120秒
		judge = False
		try:
			if not os.path.exists(root):  #判断是否存在文件并下载img
				os.makedirs(root)
			if not os.path.exists(path):
				headers = {'Referer':'http://sea.fundiving.com/fish_id/' + ID ,
				'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}
				read = requests.get(url,headers = headers)
				with open(path, "wb")as f:
					f.write(read.content)
					f.close()
					print("文件保存成功！")
					# 告诉后端有文件更新
					print("{\"sign\":\"1\"}")
					judge = True
			else:
				print("文件已存在！")
				# 告诉后端有文件没有更新
				# print("{\"sign\":\"0\"}")
				judge = True
		except:
			# 告诉后端爬取发生了错误
			print("{\"sign\":\"40163\"}")
			print("文件爬取失败！")
	if judge == False:
		# 告诉后端爬取发生了错误
		print("{\"sign\":\"40163\"}")
		print("时间过长!")
		
def getUrl(url):
	try:
		headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36'}
		read = requests.get(url,headers=headers,timeout = 60)  #获取url
		read.encoding = 'utf-8'
		read.raise_for_status()   #状态响应 返回200连接成功
		read.encoding = read.apparent_encoding		#从内容中分析出响应内容编码方式
		return read.text	#Http响应内容的字符串，即url对应的页面内容
	except:
		return "连接失败！"
	
def getID(html):
	soup = BeautifulSoup(html,"html.parser")
	a = soup.find('div',class_ = 'imgbox').find('a')
	href = a['href']
	ID = href.split('/')[-1]
	return ID


def getPic(ID):
	url = 'http://sea.fundiving.com/fish_id/image/' + ID
	link = 'http://sea.fundiving.com/fish_id/' + ID
	soup1 = BeautifulSoup(getUrl(url),"html.parser")
	soup2 = BeautifulSoup(getUrl(link),"html.parser")
	all_img = soup1.find('div',class_ = 'photobox').find_all('img')
	tr = soup2.find('tr')
	td = tr.find_all('td')
	name = td[1].text + td[4].text
	for img in all_img:
		src = img['src']  #获取img标签里的src内容
		img_url = src
		root = "` + workDir + `tmp_files/fundiving/" + sys.argv[1] + '/' + name + '/'	  #保存的路径
		img_name = img_url.split('/')[-1]
		path = root + re.split('jpg',img_name)[0] + 'jpge'		#获取img的文件名
		print(path)
		timeout(path,root,img_url,ID)

if __name__ == '__main__':
	url = "http://sea.fundiving.com/fish_base/index.php?keyword=" + sys.argv[1] + "&act=fish_list&Class_id=0&Order_id=0&Family_id=0"
	html = getUrl(url)
	ID = getID(html)
	getPic(ID)`))

	// dir := ``
	// writeBinaryFile(workDir, dir, fileName)

	ch <- true

}
