#include <regex>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <netdb.h>
#include "CKCrawlerWork.h"
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <sstream>
#include <iostream>
#include <time.h>


CKCrawlerWork::CKCrawlerWork(std::string init) : 
	m_db(),
	m_strDomain(""),
	m_strIp(""),
	m_bHasRobots(true),
	m_strDisablePath(""),
	m_bDomainTurth(false),
	m_strPath(""),
	m_strUpdateTime(""),
	m_bPathTurth(false),
	m_strParams(""),
	m_strTitle(""),
	m_strRequestCode("200")
{
	m_strDomain = ParserDomain(init);
	m_strIp = ParserIP(m_strDomain);
	if(!m_strIp.empty())
	{
		m_bDomainTurth = true;
		m_strPath = ParserPath(init);
		m_strParams = ParserParams(init);
	}
}


std::string CKCrawlerWork::DoingWork(std::string strOtherParams)
{
	std::string strReturn = "";
	if(InitDatabase(strOtherParams))
	{
		if(m_bDomainTurth)
		{//可以开始工作
			if(CheckDBValueIsExistsOrAllowWork())
			{//没有重复记录或者允许工作
				//保存网页内容，防止网页过长消耗过多拷贝时间
				std::string strHtml = "";
				GetAndFormatHttptoHtml(strHtml);
				if(!strHtml.empty())
				{//结果不为空
					ParserHTML(strHtml);
					//处理回调结果
					int nSize = m_vecResultURL.size();
					for(int i = 0; i < nSize; i++)
					{//处理爬取到的网址url拼接
						strReturn += m_vecResultURL[i] + "+";
					}
					//删除最后那一位
					if( 0 < strReturn.size())
					{
						strReturn.erase(strReturn.size() - 1, 1);
					}
				}
			}
		}
		else
		{//网址解析错误
			strReturn = "ERROR";
		}
		DealMessageForDatabase();
	}
	return strReturn;
}


void CKCrawlerWork::GetAndFormatHttptoHtml(std::string &strHtml)
{
	//套接字创建准备工作   
	struct sockaddr_in server_addr;   
	bzero(&server_addr,sizeof(server_addr));
	server_addr.sin_family = AF_INET;
	server_addr.sin_addr.s_addr=inet_addr(m_strIp.c_str());
	//设置端口为PORT，即宏定义
	server_addr.sin_port = htons(80);
	//创建套接字
	int socketserver = socket(AF_INET, SOCK_STREAM, 0);
	if(-1 == connect(socketserver, 
		(struct sockaddr *)(&server_addr), 
		sizeof(struct sockaddr)))   
	{   
		m_bDomainTurth = false;
		return;
	}
	else if(false)
	{//获取Robots协议
		std::string strRobots = "GET /robots.txt HTTP/1.0\nUser-Agent: K_Crawler\n";
		strRobots += "Host: " + m_strDomain + "\nAccept: */*\n\n";
		send(socketserver, strRobots.c_str(), strRobots.size() + 1, 0);
		strRobots.clear();
		while(1)
		{
			char buff[2048] = { 0 };
			int get = recv(socketserver, buff, 2048, 0);
			if(-1 == get)
			{
				m_bHasRobots = false;
				return;
			}
			strRobots += buff;
			if( 0 == get)
			{
				break;
			}
		}
		int nTemp = strRobots.find("\r\n");
		if(-1 == nTemp)
		{
			m_bHasRobots = false;
			return;
		}
		std::string strTemp = strRobots.substr(0, nTemp);
		if(-1 != strTemp.find("200"))
		{//如果能从第一行中找到200返回码,说明存在爬虫文件
			//去空格
			int nE = 0;
			while(!strRobots.empty())   
			{  
				nE = strRobots.find(" ");
				if(-1 == nE)
				{
					break;
				}
				else
				{
					strRobots.erase(nE, 1);
				}
			}
			//由于为小爬虫
			//网站不可能有对应的记录
			//故直接处理为User-Agent: *
			int nPos = strRobots.find("User-Agent: *");
			if(-1 != nPos)
			{//找到通用选项
				int nEnd = strRobots.find("User-Agent", nPos);
				if(-1 == nEnd)
				{//该项为最后的选项
					strRobots.erase(0, nPos);
				}
				else
				{//该项不是最后的选项
					strRobots.erase(nEnd).erase(0, nPos);
				}
				int line = 0;
				while(1)
				{
					//按行抽取
					line = strRobots.find("\r\n");
					if(-1 == line)
					{//记录抽取完毕
						break;
					}
					int split = strRobots.substr(0, line).find(":");
					m_strDisablePath += strRobots.substr(split, line - split) + ";";
					//删除一行
					strRobots.erase(0, line);
				}
				m_bHasRobots = true;
			}
			else
			{//其余情况则认为没有爬虫记录
				m_bHasRobots = false;
			}		
		}
		else
		{//其余情况则认为没有爬虫记录
			m_bHasRobots = false;
		}
	}
	//发送Get请求
	std::string strSend;
	strSend += "GET " + m_strPath + " HTTP/1.0\nUser-Agent: K_Crawler\nHost: ";
	strSend += m_strDomain + "\nAccept: */*\n\n";
	send(socketserver, strSend.c_str(), strSend.size() + 1, 0);
	//清空html字符串
	strHtml.clear();
	//得到回复
	while(1)
	{
		char buff[2048] = { 0 };
		int get = recv(socketserver, buff, 2048, 0);
		if(-1 == get)
		{
			m_bPathTurth = false;
			return;
		}
		strHtml += buff;
		if(0 == get)
		{
			break;
		}
	}
	close(socketserver);
	if(!strHtml.empty())
	{
		m_bPathTurth = true;
	}
	strHtml.erase(0, strHtml.find("\r\n\r\n"));
}


void CKCrawlerWork::ParserHTML(const std::string &strHtml)
{
	//分割title
	//title格式<title>...</title>
	//防止搜索过大，find到</head>处再调用
	//网址分割提取
	int head_end_pos = strHtml.find("</head>");
	if(-1 != head_end_pos)
	{//找到<head>结束符</head>
		ParserTitle(strHtml.substr(0, head_end_pos));
		//</head>占了7位
		ParserBody(strHtml.substr(head_end_pos + 7, strHtml.size()));
	}
	else
	{//没找到标头，认为网页没有标头或网页出，直接解析
		ParserBody(strHtml);
	}
}


void CKCrawlerWork::ParserTitle(const std::string &strHead)
{
	std::regex pattern("<title>.+</title>", std::regex::icase);
	const std::sregex_token_iterator end;
	std::sregex_token_iterator i(strHead.begin(), 
		strHead.end(), pattern);
	if( i != end )
	{//匹配并保存网址且仅保存第一个
		m_strTitle = *i;
		m_strTitle.erase(0, 7);
		m_strTitle.erase(m_strTitle.size() - 8);
	}
}


void CKCrawlerWork::ParserBody(const std::string &strBody)
{
	// std::regex pattern("src=\".+\"|href=\".+\"");
	// std::regex pattern("href=\".+\"");
	std::regex pattern("<\\s*[Aa]\\s+[^>]*href\\s*=\\s*\"([^\"]*)\"", std::regex::icase);
	const std::sregex_token_iterator end;
	for (std::sregex_token_iterator i(strBody.begin(), 
		strBody.end(), pattern, 1); i != end ; ++i)
	{//匹配并保存网址
		//处理最后的"和前面的href="
		std::string strUrlTemp = *i;
		// strUrlTemp.erase(strUrlTemp.size() - 1,	1).erase(0, 6);
		//判断域名是否合法
		if(CheckDomain(strUrlTemp))
		{//若合法则加入结果容器中
			m_vecResultURL.push_back(ParserURL(strUrlTemp));
		}
	}
}


std::string CKCrawlerWork::ParserURL(const std::string &strURL)
{
	std::string url;
	if("//" == strURL.substr(0, 2))
	{//以//开头
		url = strURL.substr(2);
	}
	else if("http://" == strURL.substr(0, 7))
	{/*http://开头*/
		url = strURL.substr(7);
	}
	else if("https://" == strURL.substr(0, 8))
	{/*https://开头*/
		url = strURL.substr(8);
	}
	else if("www." == strURL.substr(0, 4))
	{//以www.开头
		url = strURL;
	}
	else if("/" == strURL.substr(0, 1))
	{//以/开头
		url = m_strDomain + strURL;
	}
	else
	{
		if(
			-1 != strURL.find(".com") ||
			-1 != strURL.find(".top") ||
			-1 != strURL.find(".win") ||
			-1 != strURL.find(".red") ||
			-1 != strURL.find(".org") ||
			-1 != strURL.find(".net") ||
			-1 != strURL.find(".gov") ||
			-1 != strURL.find(".wang") ||
			-1 != strURL.find(".edu") ||
			-1 != strURL.find(".mil") ||
			-1 != strURL.find(".blz") ||
			-1 != strURL.find(".name") ||
			-1 != strURL.find(".info") ||
			-1 != strURL.find(".mobi") ||
			-1 != strURL.find(".pro") ||
			-1 != strURL.find(".travel") ||
			-1 != strURL.find(".asia") ||
			-1 != strURL.find(".rec") ||
			-1 != strURL.find(".cn") ||
			-1 != strURL.find(".int") ||
			-1 != strURL.find(".post")
		)
		{//包含常用顶级域名
			url = strURL;	
		}
		else
		{
			//其余均视为是相对地址，添加域名后使用
			url = m_strDomain + "/" + strURL;
		}
	}
	return url;
}


std::string CKCrawlerWork::ParserDomain(const std::string &strURL)
{
	std::string strTemp = ParserURL(strURL);
	int nPos = strTemp.find("/");
	if(-1 == nPos)
	{
		return strTemp;
	}
	else
	{
		return strTemp.substr(0, nPos);
	}
}


bool CKCrawlerWork::CheckDomain(const std::string &strURL)
{
	std::string strDomain = ParserDomain(strURL);
	return !(ParserIP(strDomain).empty());
}


std::string CKCrawlerWork::ParserIP(const std::string &strURL)
{
	std::string strDomain = ParserDomain(strURL);
	//判断域名是否合法
	//若返回值不为空则说明解析到对应的ip地址，存放在返回值中
	return GetHostByName(strDomain);
}


std::string CKCrawlerWork::ParserPath(const std::string &strURL)
{
	std::string strTemp = ParserURL(strURL);
	strTemp = strTemp.substr(m_strDomain.size());
	if(1 >= strTemp.size())
	{//只剩下“/”或者长度为0
		return "/";
	}
	else
	{
		//参数分隔符
		int nPos = strTemp.find("?");
		if(-1 == nPos)
		{
			return strTemp;
		}
		else
		{
			return strTemp.substr(0, nPos);
		}
	}
}


std::string CKCrawlerWork::ParserParams(const std::string &strURL)
{
	int nPos = strURL.find("?");
	if(-1 == nPos)
	{
		return "?";
	}
	else
	{
		return strURL.substr(nPos + 1);
	}
}	


bool CKCrawlerWork::InitDatabase(std::string strParams)
{
	std::string host = "127.0.0.1";
	std::string user = "root";
	std::string password = "root";
	if(!strParams.empty())
	{
		int nPos = strParams.find("|");
		host = strParams.substr(0, nPos);
		strParams.erase(0, nPos + 1);
		nPos = strParams.find("|");
		user = strParams.substr(0, nPos);
		password = strParams.erase(0, nPos + 1);
	}
	m_db.SetHost(host);
	m_db.SetUserPasswork(user, password);
	m_db.SetDBname("k_crawler");
	if(!m_db.connectDB())
	{//连接出错
		return false;
	}
	else
	{
		return true;
	}
}


bool CKCrawlerWork::CheckDBValueIsExistsOrAllowWork()
{
	std::string domain_id = "";
	std::string path_id = "";
	std::string sql = "";
	sql += "select * from k_url_root where domain=\"" + m_strDomain + "\"";
	K_Engine::CKDBSelectResult *selectresult = m_db.Select(sql);
	if(NULL == selectresult)
	{//查找出错
		return false;
	}
	else
	{
		if(0 == selectresult->nGetResultLength())
		{//没有记录
			m_db.FreeCKDBSR(selectresult);
			return true;
		}
		else
		{//存在域名记录
			domain_id = selectresult->GetValue(0, "domain_id");
			m_db.FreeCKDBSR(selectresult);
		}		
	}
	sql = "";
	sql += "select * from k_url_path where id_domain=" + domain_id + 
		" and path=\"" + m_strPath + "\"";
	selectresult = m_db.Select(sql);
	if(NULL == selectresult)
	{//查找出错
		return false;
	}
	else
	{
		if(0 == selectresult->nGetResultLength())
		{//没有记录
			m_db.FreeCKDBSR(selectresult);
			return true;
		}
		else
		{//存在路径表
			path_id = selectresult->GetValue(0, "id_path");
			m_db.FreeCKDBSR(selectresult);
		}
	}
	sql = "";
	sql += "select * from k_page where id_domain=" + domain_id + 
		" and id_path=" + path_id;
	selectresult = m_db.Select(sql);
	if(NULL == selectresult)
	{//查找出错
		return false;
	}
	else
	{
		int nPath_Length = selectresult->nGetResultLength();
		if(0 == nPath_Length)
		{//没有记录
			m_db.FreeCKDBSR(selectresult);
			return true;
		}
		//存在页面
		if( 200 <= nPath_Length)
		{
			//如果存在或超过30个的该路径的带参页面
			//禁止继续遍历
			m_db.FreeCKDBSR(selectresult);
			return false;
		}
		else
		{
			//判断附带参数是否重复
			for(int i =0; i < nPath_Length; i++)
			{
				if( m_strParams == selectresult->GetValue(i, "params"))
				{
					m_db.FreeCKDBSR(selectresult);
					return false;
				}
			}
		}
		m_db.FreeCKDBSR(selectresult);
		return true;
	}
}


void CKCrawlerWork::DealMessageForDatabase()
{
	std::string strSQL = "";
	std::string id_domain = "";
	std::string id_path = "";
	//查找域名，不存在就存储
	strSQL += "select id_domain from k_url_root where domain=\"" + m_strDomain + "\"";
	K_Engine::CKDBSelectResult *selectresult = m_db.Select(strSQL);
	if(NULL == selectresult)
	{
		return;
	}
	else if(0 == selectresult->nGetResultLength())
	{
		m_db.FreeCKDBSR(selectresult);
		strSQL = "";
		strSQL += "insert into k_url_root(domain,ip,has_robots,disable_path,turth) values(\"" +  
			m_strDomain + "\",\"" + m_strIp + "\",false,\"\",";
		if(m_bDomainTurth)
		{//域名正确
			strSQL += "true)";
			if(!m_db.Query(strSQL))
			{
				return;
			}
			else
			{
				strSQL = "";
				strSQL += "select id_domain from k_url_root where domain=\"" + m_strDomain + "\"";
				K_Engine::CKDBSelectResult *selectresult = m_db.Select(strSQL);
				id_domain = selectresult->GetValue(0, "id_domain");
				m_db.FreeCKDBSR(selectresult);
			}
		}
		else
		{//域名不正确,不管插入如何都退出
			strSQL += "false)";
			m_db.Query(strSQL);
			return;
		}
	}
	else
	{
		id_domain = selectresult->GetValue(0, "id_domain");
		m_db.FreeCKDBSR(selectresult);
	}
	//查找路径，不存在就存储
	if(!m_strPath.empty())
	{
		strSQL = "";
		strSQL += "select id_path from k_url_path where id_domain=" +  
			id_domain + " and path=\"" + m_strPath + "\"";
		selectresult = m_db.Select(strSQL);
		if(NULL == selectresult)
		{
			return;
		}
		else if(0 == selectresult->nGetResultLength())
		{
			m_db.FreeCKDBSR(selectresult);
			time_t timep;
			time(&timep);
			std::string strTime = "";
			int i = 15;
			for(; timep && i ; --i, timep /= 10)  
			{
				strTime = "0123456789"[timep % 10] + strTime;
			} 
			
			strSQL = "";
			strSQL += "insert into k_url_path(path,id_domain,updatetime,turth) values(\"" + m_strPath + 
				"\"," + id_domain + "," + strTime + ",";
			if(m_bPathTurth)
			{//路径正确
				strSQL += "true)";
				if(!m_db.Query(strSQL))
				{
					return;
				}
				else
				{
					strSQL = "";
					strSQL += "select id_path from k_url_path where id_domain=" +  
						id_domain + " and path=\"" + m_strPath + "\"";
					selectresult = m_db.Select(strSQL);
					id_path = selectresult->GetValue(0, "id_path");
					m_db.FreeCKDBSR(selectresult);
				}
			}
			else
			{//路径不正确,不管插入如何都退出
				strSQL += "false)";
				m_db.Query(strSQL);
				return;
			}
		}
		else
		{
			id_path = selectresult->GetValue(0, "id_path");
			m_db.FreeCKDBSR(selectresult);
		}
	}
	//存储参数
	strSQL = "";
	strSQL += "select id_page from k_page where id_domain=" + 
		id_domain + " and id_path=" + id_path + "";
	selectresult = m_db.Select(strSQL);
	if(NULL == selectresult)
	{
		return;
	}
	else
	{
		m_db.FreeCKDBSR(selectresult);
		strSQL = "";
		strSQL += "insert into k_page(id_domain,id_path,params,title,request) values(" + id_domain +
			"," + id_path + ",\"" + m_strParams + "\",\"" + m_strTitle + 
			"\"," + m_strRequestCode + ")";
		m_db.Query(strSQL);
	}
}


std::string CKCrawlerWork::GetHostByName(const std::string name)
{
	std::string ret = "";
	struct addrinfo hints, *result, *cur;
	int errno;
	
	hints = { 0 };
	hints.ai_family = AF_INET;
	hints.ai_socktype = SOCK_STREAM;
	
	errno = getaddrinfo(name.c_str(), NULL, &hints, &result);
	if( 0 != errno)
	{
		return ret;
	}
	else
	{
		char ipstr[16];
		// for(cur = result ; cur != NULL ; cur = cur->ai_next)
		// {
		// 	inet_ntop(
		// 		AF_INET, 
		// 		&(((struct sockaddr_in *)(cur->ai_addr))->sin_addr),
		// 		ipstr,
		// 		16);
		// 	
		// }
		
		cur = result;
		inet_ntop(
			AF_INET, 
			&(((struct sockaddr_in *)(cur->ai_addr))->sin_addr),
			ipstr,
			16);
		ret += ipstr;
		freeaddrinfo(result);
		return ret;
	}
}
