#!/usr/bin/python
# -*- coding:utf-8 -*-
import datetime
from bs4 import BeautifulSoup
from ext import logging


class Parser(object):
    def parser(self, html, url):
        """
        网页文档解析器
        :param html: 网页文档
        :param url: 网页的url
        :return: (文档内的链接, 问题字典/None)
        """
        soup = BeautifulSoup(html, 'html.parser', from_encoding='utf-8')

        if len(url[:-1].replace('http://', '').split('/')) == 1:
            # 如果页面为导航页面
            links = list()
            district_list = list()
            # 区县信息
            for node in [i for i in soup.select('dt a') if '网' not in i.text]:
                district_list.append({
                    'text': str(node.text),
                    'url': str(node.attrs.get('href')),
                    'spelling': str(node.attrs.get('href')).replace('http://', '').split('/')[1]
                })
            street_list = list()
            # 街道/乡镇信息
            for node in [i for i in soup.select('dd a') if
                         '南充' not in i.text and '网' not in i.text and '/info/' not in i.attrs.get('href')]:
                try:
                    street_list.append({
                        'text': str(node.text),
                        'url': str(node.attrs.get('href')),
                        'district_spelling': str(node.attrs.get('href')).replace('http://', '').split('/')[1],
                        'spelling': str(node.attrs.get('href')).replace('http://', '').split('/')[2]
                    })
                    links.append(str(node.attrs.get('href')))
                except Exception, e:
                    logging.error(e.message)
            data = {
                'type': 1,
                'district_list': {
                    'table_name': 'district',
                    'data_list': district_list
                },
                'street_list': {
                    'table_name': 'street',
                    'data_list': street_list
                }
            }
            return links, data
        elif len([i for i in soup.find_all('dt') if '乡镇导航' in i.text]):

            links = list()
            pages = soup.find(class_='pages')
            if pages:  # 获取分页url
                links = [a.attrs.get('href') for a in pages.find_all('a')]

            url_info = url[:-1].replace('http://', '').split('/')  # 拆分url获取当前页面数据的区县/街道信息
            district_spelling = url_info[1]
            street_spelling = url_info[2]

            co_list = list()
            for co in soup.select('.companylist li h4 a'):
                co_list.append({
                    'page_url': str(co.attrs.get('href')),
                    'title': str(co.text),
                    'id': str(co.attrs.get('href')).replace('http://', '').split('/')[3][:-4],
                    'update_at': str(datetime.datetime.now()),
                    'district_spelling': district_spelling,
                    'street_spelling': street_spelling
                })
                links.append(str(co.attrs.get('href')))
            data = {
                'type': 2,
                'co_list': {
                    'table_name': 'company',
                    'data_list': co_list
                }
            }
            return links, data
        elif '.htm' in url:
            contact_info = soup.select('#contact dd')
            gongshang_info = soup.select('#gongshang dd')
            data = {
                'type': 3,
                'data': {
                    'table_name': 'company',
                    'column': {
                        'id': url.replace('http://', '').split('/')[3][:-4],
                        'intro': str(soup.find(class_='boxcontent').text),
                        'address': str(contact_info[0].text),
                        'phone': str(contact_info[1].text),
                        'director': str(contact_info[2].text),
                        'chairman_phone': str(contact_info[3].text),
                        'email': str(contact_info[4].text),
                        'postcode': str(contact_info[5].text),
                        'fax': str(contact_info[6].text),
                        'legal_person_name': str(gongshang_info[0].text),
                        'main_products': str(gongshang_info[1].text),
                        'business_scope': str(gongshang_info[2].text),
                        'business_license': str(gongshang_info[3].text),
                        'corporate_representative': str(gongshang_info[4].text),
                        'company_create_at': str(gongshang_info[5].text),
                        'staff_num': str(gongshang_info[6].text),
                        'registered_fund': str(gongshang_info[7].text),
                        'the_category': str(gongshang_info[9].text),
                        'the_area': str(gongshang_info[10].text),
                        'good_faith_degree': str(gongshang_info[11].text),
                        'popular_value': str(gongshang_info[12].text)
                    }
                }
            }
            return None, data
        else:
            # 如果页面为其它页面
            return None, None
