﻿/*******************************************************************
Copyright (c) 2011 이재범

*  Permission is hereby granted, free of charge, to any person
*  obtaining a copy of this software and associated documentation
*  files (the "Software"), to deal in the Software without
*  restriction, including without limitation the rights to use,
*  copy, modify, merge, publish, distribute, sublicense, and/or sell
*  copies of the Software, and to permit persons to whom the
*  Software is furnished to do so, subject to the following
*  conditions:
* 
*  The above copyright notice and this permission notice shall be
*  included in all copies or substantial portions of the Software.
* 
*  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
*  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
*  OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
*  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
*  HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
*  WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
*  FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
*  OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************/

#include "hash.h"
#include "Hash_set.h"
#include <boost/thread.hpp>

#pragma once

class Crawler
{
public:
	static int total_crawler_number;
	static int total_created_crawler_number; 
	static Dictionary<string> keyword_left;
	static Dictionary<string> keyword_right; 

	// 처리가 모두 완료된 url 리스트
	static Hash_set url_finished_list;

protected:
	int crawler_id; 
	int webpage_type;
	basic_string<TCHAR> url_to_process;  // 처리할 url 주소 

	Dictionary<basic_string<TCHAR>> url_process_list; // 처리하고 있는 url 리스트. 각 url 마다 crawler 을 하나씩 호출

	vector<Hash*> hash_data; // 처리한 각 페이지의 해시 데이터
	InternetData* download_data; // 다운로드한 데이터를 보관 

public:
	Crawler() {}
	Crawler(basic_string<TCHAR>_url_to_process);

	void restart(); // Crawler 재시작
	string convert_to_filename(wstring url)
	{
		wstring limit[10] = {L"\\", L"/", L":", L"*", L"?", L"\"", L"<", L">", L"<", L"|"};
		for(int i = 0; i < 10; i++)
		{
			wstring::size_type loc = url.find(limit[i]);
			while (loc != url.npos)
			{
				url.replace(loc, 1, L"_");
				loc = url.find(limit[i]);
			}
		}

		return wstring_to_string_conv(url);

	}
	void url_extraction(); // 링크된 url 들을 분리한다.
	void equation_extraction(); // webpage 형태에 따른 수식을 분리한다. 
	int find_webpage_type(basic_string<TCHAR>& url); // url 을 통해 webpage 가 무엇인지 알아낸다.

	void tex_equation_extraction(string &list, string left, string right, vector<string> &result); // latex 파일에서 수식을 추출한다. 
	bool url_restriction(basic_string<TCHAR> &url);  // 허용 되지 않는 url 이면 true 리턴 
	void adjust_relative_address(basic_string<TCHAR>&url); // 상대 주소로 표기된 url 을 수정한다. 
	void adjust_url(basic_string<TCHAR>& url); // url 에 있는 특수 문자들 처리

	vector<boost::thread*> crawlers; // 호출할 크롤러들(연결된 url 들을 가지고 크롤러들을 호출)
	void operator()(); // 여기서 crawling 을 수행한다. 

	~Crawler(); 
};

class CrawlerArxiv : public Crawler
{
public:

	CrawlerArxiv() : Crawler() {}
	CrawlerArxiv(basic_string<TCHAR> url) : Crawler(url) {}
	void equation_extraction();
	void operator()();

	void start();
	void restart();
};