# -*- coding: utf-8 -*-

import logging, os
import codecs, re, copy
from bs4 import BeautifulSoup
from configobj import ConfigObj

from django.conf import settings
from django.db import models, connections
from financial_daily.utils import WebUtils

logger = logging.getLogger(__name__)

class StocksCategoryInFoCrawler: 
      
    def parse_config_file(self, single_file):
        # Three types of config file can be paresed, corresponding to three analytic templates.
        # The keys of dict are defined in analytic templates.
        general_args = {}
        config = ConfigObj(single_file)
        general_args['html_encoding'] = config['html_encoding']['charset']
        general_args['source_url'] = config['source_url']['url']
        general_args['target_url'] = config['target_url']['url']
        general_args['codes_args'] = config['codes_args']['codes_re']
        tags_paras_flag = config['flag']['flag']

        if tags_paras_flag == 'csrc':
            general_args['category_args'] = dict(config['category_args'].items())
            return general_args
        else:
            if tags_paras_flag == 'True':
                flag = True
            else:
                flag = False
     
            if flag:     
                general_args['category_args'] = dict(config['category_args'].items())
                temp = dict(config['category_args_addings'].items())            

                industry_args = copy.deepcopy(general_args)
                industry_args['category_args']['first_attrs_re'] = temp['industry']
               
                location_args = copy.deepcopy(general_args)
                location_args['category_args']['first_attrs_re'] = temp['location']
             
                concept_args = copy.deepcopy(general_args)    
                concept_args['category_args']['first_attrs_re'] = temp['concept']
            else:
                industry_args = copy.deepcopy(general_args)
                industry_args['url_re'] = config['industry']['url_re']
                industry_args['category_re'] = config['industry']['category_re']
              
                location_args = copy.deepcopy(general_args)
                location_args['url_re'] = config['location']['url_re']
                location_args['category_re'] = config['location']['category_re']
                
                concept_args = copy.deepcopy(general_args)
                concept_args['url_re'] = config['concept']['url_re']
                concept_args['category_re'] = config['concept']['category_re']
        
            return flag, industry_args, location_args, concept_args
       
    def build_url_by_regex(self, url_regex, category_regex, target_url, html_contents):
        
        short_urls = {}
        short_url = re.findall(url_regex, html_contents)
        category = re.findall(category_regex, html_contents)
        
        if len(short_url) == len(category) and len(short_url) > 0:
            for i in xrange(0,len(short_url)):
                # The type of each item which in short_url or category maybe tuple, when the regex is multiple.
                # So, we need to convert it to a string type.
                temp_url = ''.join(list(short_url[i])).strip()
                temp_category = ''.join(list(category[i])).strip()
                short_urls[temp_url] = temp_category  
        
        full_urls = self.__complete_url(short_urls, target_url) 
        
        return full_urls
    
    def build_url_by_tags(self, tags_paras, target_url, html_contents):
        
        full_urls = set()
        short_urls = {}
        soup =  BeautifulSoup(html_contents)
        sub_html = soup.findAll(tags_paras['first_tag'], tags_paras['first_attrs_re'])
        if len(sub_html) == 0:
            sum_html = soup.findAll(attrs = {tags_paras['first_tag']: re.compile(tags_paras['first_attrs_re'])})
      
        sub_html = BeautifulSoup(str(sub_html))
        for link in sub_html.findAll(tags_paras['second_tag']):
            temp_url = re.findall(tags_paras['second_re'], link.get(tags_paras['second_attrs']))
            temp_url = ''.join(temp_url).strip() # convert temp_url to a string type.
            if temp_url:
                short_urls[temp_url] = link.get_text()
            else:
                temp_url = link.get(tags_paras['second_attrs'])
                short_urls[temp_url] = link.get_text()   
        full_urls = self.__complete_url(short_urls, target_url)        
        
        return full_urls

    def build_csrc_url(self, tags_paras, target_url, html_contents):
        
        full_urls = set()
        short_urls = {}
        industry_classify = []
        sub_industry_classify  = []

        soup =  BeautifulSoup(html_contents)
        industry_html = soup.findAll(tags_paras['first_tag'], tags_paras['first_attrs'])
        sub_industry_html = soup.findAll(tags_paras['sub_first_tag'], tags_paras['sub_first_attrs'])
        
        # Get CSRC's officail industry classification
        industry_html = BeautifulSoup(str(industry_html))
        for item in industry_html.findAll(tags_paras['second_tag']):
            name = item.get_text()
            industry_classify.append(name.strip())
       
        # Get CSRC's officail sub_industry classification and short_url
        for item in sub_industry_html:
            item = BeautifulSoup(str(item))
            temp_sub_classify = []
            for temp in item.findAll(tags_paras['second_tag']):
                short_url = re.findall(tags_paras['second_re'], temp.get(tags_paras['second_attrs']))
                short_url = ''.join(short_url).strip()
                sub_name = temp.get_text()
                temp_sub_classify.append((short_url,sub_name.strip()))
            sub_industry_classify.append(temp_sub_classify)
        
        # Create corresponding relationship between industry classification and sub industry classification.
        if len(industry_classify) == len(sub_industry_classify):
            for i in xrange(0,len(industry_classify)):
                for item in sub_industry_classify[i]:
                    short_urls[item[0]] = industry_classify[i] + settings.CRAWL_COLUMNS_DELIMITER + item[1] 
        else:
            logger.error("the csrc classify is wrong! Please checkout.")
            raise Exception("the csrc classify is wrong!")
        
        full_urls = self.__complete_url(short_urls,target_url)
        return full_urls

    def __complete_url(self, cate_urls, target_url):

	    # The target url denfined in analytic templates contains 'url' and 'PageNumber'. 
        stock_urls =set()
        for url in cate_urls:
            parseurl = target_url.replace('url', url) 
            if parseurl.find('PageNumber'):
                for i in xrange(0,settings.DEFAULT_PAGE_NUMBER):
                    single_url = parseurl.replace("PageNumber", str(i))
                    stock_urls.add((cate_urls[url], single_url))
            else: 
                stock_urls.add((cate_urls[url], parseurl)) 
   
        return stock_urls  

    def crawl_stocks_codes(self, urls, analyze_para):
        
        stock_codes = set()
        for item in urls:
            html_contents = WebUtils.open_url(item[1], settings.DEFAULT_ENCODING, settings.DEFAULT_ENCODING)
            if html_contents:
                for temp in re.findall(analyze_para, html_contents): 
                    temp = ''.join(list(temp)).strip() 
                    stock_codes.add((item[0], temp))
         
        return stock_codes
     
    def write_file(self, contents, file_path, column_delimiter):
	    
        with codecs.open(file_path, mode = "w", encoding = settings.DEFAULT_ENCODING, errors = "ignore") as writer:
            for item in contents:
                for i in xrange(0,len(item)):
                    writer.write(item[i]) 
                    writer.write(column_delimiter)
                writer.write('\n')
            logger.info("%d data has been written to %s."  %(len(contents), file_path))
         
    def read_file(self, file_path):
	    
        with codecs.open(file_path, mode = "r", encoding = settings.DEFAULT_ENCODING, errors = "ignore") as reader: 
            lines = reader.readlines() 
            logger.info("%d data has been read from %s."  %(len(lines), file_path)) 
     
            return lines
    
    def crawl_data(self, crawl_paras, write_file_name, tags_paras = True):
        # Crawl stock info 
        try: 
            html_contents = WebUtils.open_url(crawl_paras['source_url'],
                                              crawl_paras['html_encoding'],
                                              settings.DEFAULT_ENCODING
                                             ) 
        except Exception as e:
            logger.error("Source url encountered some problems, maybe do not crawl any data.")
        else:
            try:
                if tags_paras:    
                    category_urls = self.build_url_by_tags(crawl_paras['category_args'],
                                                           crawl_paras['target_url'],
                                                           html_contents
                                                          ) 
                else:
                    category_urls = self.build_url_by_regex(crawl_paras['url_re'],
                                                            crawl_paras['category_re'],
                                                            crawl_paras['target_url'],
                                                            html_contents
                                                           )
                stocks_category = self.crawl_stocks_codes(category_urls, crawl_paras['codes_args'])
                
                if len(stocks_category) > settings.DEFAULT_STOCKS_TOTAL_NUMBER:
                    file_path = os.path.join(settings.CRAWL_STOCKS_INFO_FILE_PATH, write_file_name)
                    self.write_file(stocks_category, file_path, settings.CRAWL_COLUMNS_DELIMITER)
            except Exception as e:
                logger.exception(e)
                           
    def crawl_csrc(self, crawl_paras, write_file_name):
        # Crawl CSRC's stock info 
        try:
            html_contents = WebUtils.open_url(crawl_paras['source_url'],
                                              crawl_paras['html_encoding'],
                                              settings.DEFAULT_ENCODING
                                             )
        except Exception as e:
            logger.error("Source url encountered some problems, maybe do not crawl any data.")
        else:
            try:
                category_urls = self.build_csrc_url(crawl_paras['category_args'],
                                                    crawl_paras['target_url'],
                                                    html_contents
                                                   )  
                stocks_category = self.crawl_stocks_codes(category_urls, crawl_paras['codes_args'])
                
                if stocks_category:
                    file_path = os.path.join(settings.CRAWL_STOCKS_INFO_FILE_PATH, write_file_name)
                    self.write_file(stocks_category, file_path, settings.CRAWL_COLUMNS_DELIMITER)
            except Exception as e:
                logger.exception(e)

