#!/usr/bin/env python
# coding:utf-8

#
# 將聚會手記作後製整理
#
# Author: Chun-Yu Lee (Mat) <matlinuxer2@gmail.com>
# Copyright: Chun-Yu Lee (Mat) <matlinuxer2@gmail.com>
# License: MIT
#

from xmlrpclib import ServerProxy
from common import *


#
# 程式參數
#
read_settings_from_file()
api_user = volatile_settings['wikidot_api_user']
api_key = volatile_settings['wikidot_api_key']
site = 'hackingthursday'

#
# 程式資料結構
#
proxy = ServerProxy('https://'+api_user+':'+api_key+'@www.wikidot.com/xml-rpc-api.php')

selecte_pages = []
collection = []
metadatas = []

mask_pages = [ 
	   'faq',
	   'funding',
	   'invite',
	   'photoandvideo',
	   'routine',
	   'sandbox',
	   'start',
	   ]


#
# 程式資料結構
#


#
# Main routine below
#


# 列出要處理的頁面
all_pages = proxy.pages.select({'site' : site})
for page in all_pages:
	prefix = ""
	if page.split(':').__len__() > 1:
		prefix = page.split(':')[0]

	if page[0:2] == "20" and page[4] == "-" and page[7] == "-":
		pass
	elif prefix != "" :
		pass
	else:
		isInMask = False
		if page in mask_pages:
			isInMask = True
		if not isInMask:
			selecte_pages += [ page ]

# 依更新日期作排序，最新的擺前面
for i in range( 0, selecte_pages.__len__(), 10 ):
	if i+10 <= selecte_pages.__len__():
		metadatas += proxy.pages.get_meta( {'site' : site, 'pages': selecte_pages[i:i+10] } ).items()
	else:
		metadatas += proxy.pages.get_meta( {'site' : site, 'pages': selecte_pages[i:selecte_pages.__len__()] } ).items()

metadatas.sort(key=lambda x:x[1].get('updated_at'), reverse=True) 
selecte_pages = []
print metadatas
for item in metadatas:
	selecte_pages.append( item[0] )

# 開始產生選單
page_of_index = ""
for page in selecte_pages:
	print "處理頁面: ", page
	metadata = proxy.pages.get_meta( {'site' : site, 'pages': [page] } )
	fullname = metadata.get(page).get('fullname').encode('utf8')
	title = metadata.get(page).get('title').encode('utf8')
	page_of_index += " * [[[%s|%s]]]\n" % ( fullname, title )


# 將新列表塞到主選單中
nav_page = "nav:top"
keyword_beg = "\n* [# Wiki]\n"
keyword_end = "[!--Wiki--]"

nav_top_page = proxy.pages.get_one({"site":site,"page":nav_page})
nav_top_content = nav_top_page["content"].encode('utf8')
k_start = nav_top_content.find( keyword_beg )
k_end = nav_top_content.find( keyword_end )

if k_start >= 0 and k_end >= 0 and k_end > k_start :
    k_end = k_end + keyword_end.__len__()

    new_nav_top_content=nav_top_content[0:k_start]+ keyword_beg +page_of_index + keyword_end +nav_top_content[k_end:]

    print new_nav_top_content
    proxy.pages.save_one({'site' : site, 'page' : nav_page, 'content' : new_nav_top_content })       

