#!/usr/bin/env python
# -*- coding: utf-8 -*-

#!/usr/bin/env python
#-*- coding:utf-8 -*-

#import webkit
from lxml import etree
import os, re, requests
from DataBase import DataBase

class ParseData():
    def __init__(self, view):
        self.view = view
        self.view.connect("load_finished", self.get_html)
        self.html = self.get_html()

    def get_html(self, *argv):
        self.view.execute_script('oldtitle=document.title;document.title=document.documentElement.innerHTML;')
        html = self.view.get_main_frame().get_title()
        #print html
        self.view.execute_script('document.title=oldtitle;')
        return html

    def get_next_page_url(self, tag = '//a[@id="pnnext"]'):
        mainframe = self.view.get_main_frame()
        uri = mainframe.get_uri()
        site = uri.split('/')[0]+'//'+uri.split('/')[2]

        if self.html:
            tree = etree.HTML(self.html)
            #net_page_uri = tree.xpath('//a[@class="nextPage"]')[0].get('href')
            next_page_uri = tree.xpath(tag)
            if next_page_uri:
                net_page_url = site + next_page_uri[0].get('href')
            else:
                net_page_url = None
                
        return net_page_url

    def get_total_pages(self, tag='//div[@class="pages"]/text()'):
        total_pages = None

        if self.html:
            tree = etree.HTML(self.html)
            page_num = tree.xpath(tag)
            if page_num:
                total_pages = page_num[0]
            else:
                total_pages = None
        return total_pages

class GetUrl(ParseData):
    def __init__(self, view, url_type, diy_tag=None):
        ParseData.__init__(self, view)
        self.view = view
        self.type = url_type
        self.url_dict = {}
        if self.type == 'diy':
            self.nextpage_tag = ''
            self.url_tag = diy_tag

        if self.type == 'google':
            self.nextpage_tag = '//a[@id="pnnext"]'
            self.url_tag = '//h3[@class="r"]'

        if self.type == 'baidu':
            self.nextpage_tag = '//a[@class="n"]'
            self.url_tag = '//h3[@class="t"]'

        if self.type == 'order':
            self.nextpage_tag = '//a[@class="nextPage"]'
            self.totalpage_tag = '//div[@class="pages"]/text()'
        
    def get_page_data(self):
        self.html = self.get_html()
        if self.type == 'diy':
            self.get_diy_url()

        if self.type == 'google':
            print 'get google page ...'
            self.get_google_url()

        if self.type == 'baidu':
            print 'get baidu page ...'
            self.get_baidu_url()

        return self.url_dict
   
    def get_google_url(self):
        #print self.html
        if self.html:
            tree = etree.HTML(self.html)
            h3 = tree.xpath(self.url_tag)
            for h in h3:
                url = h.xpath('a')[0].get('href')
                text = h.xpath('a')[0].xpath('text()')
                if text:
                    text = text[0]#.decode('gbk').encode('utf-8')
                SortUrl().make_url_dict(self.url_dict, [url], text)
            return self.url_dict

    def get_baidu_url(self):

        if self.html:
            tree = etree.HTML(self.html)
            h3 = tree.xpath(self.url_tag)
            for h in h3:
                url = h.xpath('a')[0].get('href')
                text = h.xpath('a')[0].xpath('text()')[0]
                #print url
                url = requests.get(url,  allow_redirects=False).headers['Location']
                if url:
                    SortUrl().make_url_dict(self.url_dict, [url], text)
            return self.url_dict

    def get_diy_url(self):

        if len(self.url_tag.split(':')) != 2:
            print 'Please start with "R:" or "X:"'
            return

        if  self.html and self.url_tag.split(':')[0] == 'R':
            url_list = re.findall(self.url_tag.split(':')[1], self.html)
            for url in url_list:
                SortUrl().make_url_dict(self.url_dict, [url])
            return self.url_dict

        if self.html and self.url_tag.split(':')[0] == 'X':
            tree = etree.HTML(self.html)
            url = tree.xpath(self.url_tag.split(':')[1])
            if url:
                for u in url:
                    href = u.get('href')
                    text = u.xpath('text()')[0]     #.decode('gbk').encode('utf-8')
                    SortUrl().make_url_dict(self.url_dict, [href], text)
            return self.url_dict
        else:
            print 'Get no html data !!!'
            return

class SortUrl():

    def make_url_dict(self, url_dict = {}, url = [], text=None):  #e.g. url_dict={'baidu.com':{'path':{'dir1':1, 'file1':2, ...},'text':'baidu'}}
        for i in xrange(len(url)):
            site, uri = self.get_site(url[i])
            self.add_one(url_dict, site, uri, text)
        return 

    def get_site(self, url):
        url = url.split('?')[0]
        #print url
        if len(url.split('://')) == 2:
            schem = url.split('://')[0] + '://'
            site = url.split('://')[1].split('/')
            if len(site) > 1:
                domain = site[0]
                if site[1]:
                    uri = '/' + '/'.join(site[1:])
                else:
                    uri = '/'
            else:
                domain = site[0]
                uri = '/'
            site = schem + domain
        else:
            print 'URL must start with \'http\'!\nThis is: %s' % url
            site = None
            uri = None
        return site, uri

    def add_one(self, url_dict, site, uri, text=None):
        if not site:
            print 'Site is None!'
            return
        if url_dict.has_key(site):
            if url_dict[site]['path'].has_key(uri):
                url_dict[site]['path'][uri] += 1
            else:
                url_dict[site]['path'][uri] = 1
        else:
            url_dict[site] = {}
            url_dict[site]['path'] = {}
            url_dict[site]['path'][uri] = 1
            url_dict[site]['text'] = text
        return

    def parse_url_dict(self, dit = {}):
        sites = dit.keys()
        for site in sites:
            yield (site, dit[site]['path'].keys(), dit[site]['text'])  

class GetOrder(ParseData):
    sql_table = """create table 'mm'('cite_name' Text not null, 
                                       'cite_url' text not null,
                                       'order_num' text not null,
                                       'track_time' text,
                                       'amount' text null,
                                       'fanli' text null,
                                       'status' text not null)"""
    sql_save = """ insert into 'mm' values (?, ?, ?, ?, ?, ?, ?)"""
    
    def __init__(self, view, db='51.db'):
        ParseData.__init__(self, view)
        self.view = view
        self.dbname = db        
        self.fanli_already = 0.0
        self.fanli_willbe = 0.0
        self.fanli_Fb = 0
        self.unuse_num = 0
        #self.column_names = ['购物网站'.decode('gbk').encode('utf-8'), '订单号'.decode('gbk').encode('utf-8'), '跟踪时间' .decode('gbk').encode('utf-8'), '订单金额'.decode('gbk').encode('utf-8'), '返利金额'.decode('gbk').encode('utf-8'), '返利状态'.decode('gbk').encode('utf-8')]
        self.column_names = ['序号', '购物网站', '订单号', '跟踪时间' , '订单金额', '返利金额', '返利状态']
        self.nextpage_tag = '//a[@class="nextPage"]'
        self.totalpage_tag = '//div[@class="pages"]/text()'
        self.last_url = ''
        self.createDB()
        self.order_list = []
        #print 'Get order ...'
        
    def login(self):
        pass

    def get_page_data(self):
        page_orders = []
        data_save = []
        self.html = self.get_html()
        print 'Get order ...'
        if self.html:
            tree = etree.HTML(self.html)
            orderlist = tree.xpath('//tr[@class="J-order-list"]')
            for order in orderlist:
                site_url = order.xpath('td[@class="td-site"]/a')[0].get('href')
                site_name = order.xpath('td[@class="td-site"]/a/text()')[0]
                or_number = order.xpath('td[@class="td-number"]/text()')[0]
                track_time = order.xpath('td[@class="td-time"]/text()')[0]
                amount = order.xpath('td[@class="td-amount"]/text()')[0]
                status = order.xpath('td[@class="td-status"]/text()')[0]
                if status == '等待返利':#.decode('gbk').encode('utf-8'):
                    fanli = order.xpath('td[@class="td-fanli"]/text()')
                    if fanli:
                        fanli = fanli[0]
                        if fanli.find('F') != -1:
                            fb = re.search('\d{1,4}',fanli)
                            self.fanli_Fb += int(fb.group())
                        else:
                            self.fanli_willbe += float(fanli.replace('元', ''))#.decode('gbk').encode('utf-8'), ''))
                    else:
                        fanli = ''
                if status == '已返利':#.decode('gbk').encode('utf-8'):
                    fanli = order.xpath('td[@class="td-fanli"]/strong/text()')
                    if fanli:
                        fanli = fanli[0]
                        if fanli.find('F') != -1:
                            fb = re.search('\d{1,4}',fanli)
                            self.fanli_Fb += int(fb.group())
                        else:
                            self.fanli_already += float(fanli.replace('元', ''))#.decode('gbk').encode('utf-8'), ''))
                    else:
                        fanli = ''
                if status == '无效订单':#.decode('gbk').encode('utf-8'):
                    fanli = ''
                    self.unuse_num += 1

                self.order_list.append([site_name+'\n'+site_url, or_number, track_time, amount, fanli, status])

                print '%s, %s, %s, %s, %s, %s, %s' % (site_name, site_url, or_number, track_time, amount, fanli, status)
                data_save.append((site_name, site_url, or_number, track_time, amount, fanli, status))
                
            conn = self.db.Connect(self.dbname)
            self.db.InsertData(conn, self.sql_save, data_save)
            #self.order_list += page_orders
            self.order_list.append(['已返利'+':'+str(self.fanli_already),
                                    '待返利'+':'+str(self.fanli_willbe),
                                    '返利共计'+':'+str(self.fanli_already+self.fanli_willbe), 
                                    'F币共计'+':'+str(self.fanli_Fb),
                                    None,
                                    '无效订单数'+':'+str(self.unuse_num)
                                    ])
            '''
            self.order_list.append(['已返利'.decode('gbk').encode('utf-8')+':'+str(self.do.fanli_already),
                                    '待返利'.decode('gbk').encode('utf-8') +':'+str(self.do.fanli_willbe),
                                    '返利共计'.decode('gbk').encode('utf-8')+':'+str(self.do.fanli_already+self.do.fanli_willbe), 
                                    'F币共计'.decode('gbk').encode('utf-8')+':'+str(self.do.fanli_Fb),
                                    None,
                                    '无效订单数'.decode('gbk').encode('utf-8')+':'+str(self.do.unuse_num)
                                    ])
            '''
        return self.order_list

    def get_total_page(self):
        #total_pages = None
        page_num = self.get_total_pages(self.totalpage_tag)
        if page_num:
            total_pages = page_num.split('/')[1].replace('Ò³'.decode('gbk').encode('utf-8'),'')
            print int(total_pages)
        return int(total_pages)

    def createDB(self):
        
        self.db = DataBase()
        if not os.path.exists(self.dbname):
            conn= self.db.Connect(self.dbname)
            self.db.CreateTable(conn, self.sql_table)