# -*- coding: utf-8 -*-
# article_info.py
# Created by Hardy on 26th, Jan
# Copyright 2017 杭州网川教育有限公司. All rights reserved.
import re
import zlib
from bs4 import BeautifulSoup
from querier.esquerier import ElasticSearchQuerier
import psycopg2


class StarInfoQuerier(ElasticSearchQuerier):

    def __init__(self, article_search, config):
        super(StarInfoQuerier, self).__init__(None, None, None)
        self.article_search = article_search
        self.host = config['host']
        self.port = config['port']
        self.user = config['user']
        self.password = config['password']
        self.db = config['database']
        # self.table = config['table']

    def search(self, args):
        arg_dict = args.get('arg_dict')
        arg_dict = {} if not arg_dict else arg_dict

        offset = arg_dict.get('offset', 0)
        limit = arg_dict.get('limit', 10)
        term = args.get('term', '')
        conn, cur = None, None

        res = []

        try:
            conn = psycopg2.connect(database=self.db, user=self.user, password=self.password,
                                    host=self.host, port=self.port)

            cur = conn.cursor()

            sql = """select
                          name,
                          img,
                          gender,
                          area,
                          html,
                          crawl_ts
                          from baike_star where name like '%s'
                          limit %s offset %s"""
            SQL = sql % ('%' + term + '%', limit, offset)
            cur.execute(SQL)
            for d in cur.fetchall():
                if d[0].strip() is None:
                    continue
                html = str(zlib.decompress(d[4]), encoding='utf-8'),
                soup = BeautifulSoup(str(html), 'html.parser')
                try:
                    star_info = ''.join([x.get_text() for x in
                                     soup.findAll(
                                         attrs={'class': 'lemmaWgt-lemmaSummary lemmaWgt-lemmaSummary-light'})])
                except Exception as e:
                    print(e)
                    star_info = ''
                try:
                    representative_works_content = soup.find(attrs={'class': 'star-info-block works'}) \
                        .find(attrs={'class': 'slider maqueeCanvas'}).select('li')
                    representative_works = []
                    for content in representative_works_content:
                        image_url = content.find('img').get('src')
                        title = content.find(attrs={'class': 'name'}).get_text()
                        kv = {'title': title, 'image_url': image_url}
                        representative_works.append(kv)
                except Exception as e:
                    print(str(e))
                    representative_works =[]
                try:
                    star_relationship_content = soup.find(attrs={'class': 'star-info-block relations'}) \
                        .find(attrs={'class': 'slider maqueeCanvas'}).select('li')
                    star_relationship = []
                    for content in star_relationship_content:
                        avatar_url = content.find('img').get('src')
                        name = content.find(attrs={'class': 'name'}).get_text()
                        kv = {'name': name, 'avatar_url': avatar_url}
                        star_relationship.append(kv)
                except Exception as e:
                    print(str(e))
                    star_relationship = []
                try:
                    basic_desc = [re.sub(r'\[\d+\]',  '', x.get_text().replace('\\xa0', ''))
                                  for x in
                                  soup.findAll(attrs={'class': 'lemma-summary', 'label-module': 'lemmaSummary'})]
                # star_info = ''.join([x.get_text() for x in soup.findAll(attrs={'class': 'lemma-summary'})])
                except Exception as e:
                    print(str(e))
                    basic_desc = []
                try:
                    item_names = [x.get_text().strip().replace('\\xa0', '') for x in
                                  soup.findAll(attrs={'class': "basicInfo-item name"})]

                    item_values = [re.sub(r'\[\d+\]',  '', x.get_text().replace('收起', '').strip().replace('\\xa0', ''))
                                   for x in
                                   soup.findAll(attrs={'class': "basicInfo-item value"})]
                    basic_info = dict(zip(item_names, item_values))
                except Exception as e:
                    print(str(e))
                    basic_info = {}

                level2_list = []
                div_list = soup.find(attrs={'class': 'main_tab main_tab-defaultTab curTab'}).select('div')
                length = len(div_list)
                for t in range(length):
                    if """class="para-title level-2""" in str(div_list[t]):
                        level2_list.append(t)
                if len(level2_list) > 4:
                    young_process_list = div_list[level2_list[0]:level2_list[1]]
                    performing_experience_list = div_list[level2_list[1]:level2_list[2]]
                    personal_life_list = div_list[level2_list[2]:level2_list[3]]
                else:
                    young_process_list = []
                    performing_experience_list = []
                    personal_life_list = []

                young_process_content = ''
                for div_content in young_process_list[1:-2]:
                    content = div_content.get_text()
                    if len(content) > 15:
                        young_process_content += '  ' + re.sub(r'\[\d+\]',  '', content.strip().replace('\\xa0', ''))

                performing_experience = ''
                for div_content in performing_experience_list[1:-1]:
                    content = div_content.get_text()
                    if len(content) > 15:
                        performing_experience += '  ' + re.sub(r'\[\d+\]',  '', content.strip().replace('\\xa0', ''))

                personal_life = ''
                for div_content in personal_life_list[1:-1]:
                    content = div_content.get_text()
                    if len(content) > 15:
                        personal_life += '  ' + re.sub(r'\[\d+\]',  '', content.strip().replace('\\xa0', ''))

                res.append({
                    'name': d[0],
                    'img': d[1],
                    'gender': d[2],
                    'area': d[3],
                    'star_info': star_info,
                    'representative_works': representative_works,
                    'star_relationship': star_relationship,
                    'basic_info': basic_info,
                    'basic_desc': basic_desc,
                    'young_process_content': young_process_content,
                    'performing_experience': performing_experience,
                    'personal_life': personal_life
                })

        except Exception as e:
            print(str(e))
        finally:
            if conn:
                conn.close()

        result = []
        for d in res:
            name = d['name']

            result.append({
                'name': name,
                'info': d
            })

        return {
            "result": result
        }

    def _build_query(self, args): pass

    def _build_result(self, result, param): pass
