# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np


class ExtractData:
    
    # =================================================================================================
    # There are four data sources in our project:
    # Alexa:  Top popular URLs from Alexa
    # Search:  filted URls from Baidu Search using keywords 
    # World68: URls from Wordl68
    # Hao123:  URls from Hao123
    #  =================================================================================================
    
    NAME_SEARCH = 'Label/Data_search.xlsx'
    NAME_ALEXA = 'Label/Data_alexa.xlsx'
    NAME_WORLD68 = 'Label/Data_world68.xlsx'
    NAME_HAO123 = 'Label/Data_hao123.xlsx'
    
    COLUMNS_SEARCH = ['Website','Title','Class2','Class3','Key_Word']
    COLUMNS_ALEXA= ['Website','Title','Class2','Class2_Other','Class3','Title_Crawl']
    COLUMNS_WORLD68= ['Website','Title','Class2','Class3','Class_Original','Description'] 
    COLUMNS_HAO123 = ['Website','Title','Class2','Class3']
    
    CRAWL_TITLE = 'Data/Data_Crawled_Title.txt'
    
    def __init__(self,source,title=True):
        # source can be one or some of ['Alexa','Search','World68','Hao123']
        # if title = False, do not include title in trainging set
        
        datasets = self.read_data(source)
        self.X, self.Y = self.generate_website_title_category(datasets,title)
        
    # =================================================================================================
    # Read data
    def read_data_Alexa(self):
        file_name = 'Data/' + self.NAME_ALEXA
        data = pd.read_excel(file_name)
        data.columns = self.COLUMNS_ALEXA
        return data
    
    def read_data_Search(self):
        file_name = 'Data/' + self.NAME_SEARCH
        data = pd.read_excel(file_name)
        data.columns = self.COLUMNS_SEARCH
        return data
    
    def read_data_World68(self):
        file_name = 'Data/' + self.NAME_WORLD68
        data = pd.read_excel(file_name)
        data.columns = self.COLUMNS_WORLD68
        return data
    
    def read_data_Hao123(self):
        file_name = 'Data/' + self.NAME_HAO123
        data = pd.read_excel(file_name)
        data.columns = self.COLUMNS_HAO123
        return data
    
    def read_one_data(self,source):
        if source == 'Alexa':
            data = self.read_data_Alexa()
        elif source == 'Search':
            data = self.read_data_Search()
        elif source == 'World68':
            data = self.read_data_World68()
        elif source == 'Hao123':
            data = self.read_data_Hao123()
        else:
            print ('Data source is wrong ! They should be one or some of [Alexa,Search,World68,Hao123]')
        return data
    
    def read_data(self,source):
        datasets = {}
        if type(source) != list: # data from one source
            datasets[source] = self.read_one_data(source)
        else:
            for s in source: # multiple source data
                datasets[s] = self.read_one_data(s)
        return datasets # return dictionary
    
    def read_crawled_title(self):
        
        # read crawled title
        # for data except Alexa, we crawled their titles together and stored them in file CRAWL_TITLE
        
        dict_url_title = {}
        file_open = open(self.CRAWL_TITLE,'r')
        lines = file_open.readlines()
        file_open.close()
        for line in lines:
            content = line.split(';')
            if len(content) == 2:
                website = 'u\''+ content[0] + '\''
                title = 'u\''+ content[1].strip() + '\''
                if website not in dict_url_title:
                    dict_url_title[website] = title
                elif len(title) > len(dict_url_title[website]):
                    dict_url_title[website] = title
        return dict_url_title
    
    # ===================================================================================================
    # Combine data from different source 
    # Reture X,Y
    
    @staticmethod
    def generate_website_title_category_one_source(data,dict_url_title):
        # ====================================================================
        # Input: data (array)
        # Output: websites, titles, categories
        # ====================================================================
        websites = []
        categories = []
        titles = []
        for index,row in data.iterrows():
            website = row['Website'].strip()
            title = row['Title']
            category = row['Class2'].strip()
            title_crawl = np.nan
            if 'Title_Crawl' in row: 
                title_crawl = row['Title_Crawl']
            elif website in dict_url_title:
                title_crawl = dict_url_title[website]
                
            if type(title_crawl) != float and type(title) == float: 
                title = row['Title_Crawl']
            elif type(title_crawl) != float and type(title) != float and len(title_crawl) > len(title): # choose title with longer length
                title = row['Title_Crawl']
            
            if type(title) not in [float,int]:
                title = title.strip()
            else:
                title = str(title)
                
            websites.append(website)
            titles.append(title)
            categories.append(category)
           
        return websites,titles,categories
    
    def generate_website_title_category(self,datasets,title):
        # ====================================================================
        # Input: datasets (dictionary)
        # Output: List X (website and title),List Y (label)
        # ====================================================================
        URLs = []
        Xs= []
        Ys= []
        dict_url_title = self.read_crawled_title()
        for key in datasets:
            [websites,titles,categories] = ExtractData.generate_website_title_category_one_source(datasets[key],dict_url_title)
            websites = map(lambda x: x.split('://')[-1],websites)
            websites = list(websites)
            length = len(websites)
            for i in range(length):
                if websites[i] not in URLs: # deduplicate
                    URLs.append(websites[i])
                    Ys.append(categories[i])
                    if title == False:
                        Xs.append(websites[i])
                    else:
                        Xs.append([websites[i],titles[i]])
        return Xs,Ys
		
    def variant_label(self):
        variant = []
        for l in self.Y:
            label = "Interest." + l.split(" ")[0]
            variant.append(label)
        self.Y = variant
        return self
        
    
if __name__ == '__main__':
    import os
    os.chdir("..")

    d = ExtractData(['Alexa','Search','World68','Hao123'],title=True)
    print(len(d.X), len(d.Y))
            
