#!/usr/bin/python
# -*- coding:utf-8 -*-

import urllib
import re
from .. import Util
import KeywordSearch


#Web検索
class WebSearchEngine(KeywordSearch.SearchEngine):
    '''
    #検索結果上位100とってきて、「京都」を含むものだけ取り出す例
    from yi01lib.SearchEngine import YahooJapan
    
    yahoo=YahooJapan.WebSearchEngine()
    yahoo.JapaneseOnly=True
    yahoo.ResultCount=100
    searchResults=yahoo.Search(u"KUCC")
    
    for result in (res for res in searchResults if u"京都" in res.Title):
        print result    
    '''
    def __init__(self):
        self._apiKey=Util.API_Key["YahooJapan"]
        self.ResultCount=50 #取得件数
        self.JapaneseOnly=False
    
    def Search(self,unicodedQuery):
        query=unicodedQuery.encode("utf-8")
        return self._search(query,0,self.ResultCount)

    def _search(self,query,start,resultCount,):
        #Yahoo APIは、同時取得50までなので、
        if(resultCount<=50):
            url="http://search.yahooapis.jp/WebSearchService/V1/webSearch"
            q={"appid":self._apiKey,"query":query,"results":resultCount,"start":start+1}
            if self.JapaneseOnly:
                q["country"]="jp"
            
            url+="?"+urllib.urlencode(q)
            
            #ダウンロード
            xml=Util.Download(url)
            
            mTotal=re.compile('totalResultsAvailable="([0-9]+)"').search(xml)
            totalCount=-1 if mTotal is None else int(mTotal.group(1))
            
            searchResults=KeywordSearch.SearchResultCollection(totalCount)
            
            for mResult in re.compile('<Result>(.*?)</Result>',re.M|re.S).finditer(xml):
                result=mResult.group(1)
                
                m=re.compile('<Title>(.*?)</Title>').search(result)
                title=m.group(1)
                m=re.compile('<Summary>(.*?)</Summary>',re.M|re.S).search(result)
                description="" if m is None else m.group(1)
                m=re.compile('<Url>(.*?)</Url>').search(result)
                url=m.group(1)
                m=re.compile('<ModificationDate>(.*?)</ModificationDate>').search(result)
                timestamp=int(m.group(1))
                
                searchResults.append(KeywordSearch.SearchResult(title,description,url,timestamp))
            return searchResults
            
        #50件以上ある場合は、分割してダウンロードし、統合する
        else:
            res1=self._search(query,start,50)
            if res1.TotalCount > start+50:
                res2=self._search(query,start+50,resultCount-50)
                res1.extend(res2)
                del res2
            return res1

#画像検索
class ImageSearchEngine(KeywordSearch.SearchEngine):
    '''
    #検索結果上位100とってきて、「京都」を含むものだけ取り出す例
    from yi01lib.SearchEngine import YahooJapan
    
    yahoo=YahooJapan.ImageSearchEngine()
    yahoo.ResultCount=100
    searchResults=yahoo.Search(u"KUCC")
    
    for result in (res for res in searchResults if u"京都" in res.Title):
        print result    
    '''
    def __init__(self):
        self._apiKey=Util.API_Key["YahooJapan"]
        self.ResultCount=50 #取得件数
        self.ColorImageOnly=False
    
    def Search(self,unicodedQuery):
        query=unicodedQuery.encode("utf-8")
        return self._search(query,0,self.ResultCount)

    def _search(self,query,start,resultCount,):
        #Yahoo APIは、同時取得50までなので、
        if(resultCount<=50):
            url="http://search.yahooapis.jp/ImageSearchService/V1/imageSearch"
            q={"appid":self._apiKey,"query":query,"results":resultCount,"start":start+1}
            if self.ColorImageOnly:
                q["coloration"]="color"
            url+="?"+urllib.urlencode(q)
            
            #ダウンロード
            xml=Util.Download(url)
            
            mTotal=re.compile('totalResultsAvailable="([0-9]+)"').search(xml)
            totalCount=-1 if mTotal is None else int(mTotal.group(1))
            
            searchResults=KeywordSearch.SearchResultCollection(totalCount)
            
            for mResult in re.compile('<Result>(.*?)</Result>',re.M|re.S).finditer(xml):
                result=mResult.group(1)
                
                m=re.compile('<Title>(.*?)</Title>').search(result)
                title=m.group(1)
                m=re.compile('<Summary>(.*?)</Summary>',re.M|re.S).search(result)
                description="" if m is None else m.group(1)
                m=re.compile('<ClickUrl>(.*?)</ClickUrl>').search(result)
                url=m.group(1)
                
                searchResults.append(KeywordSearch.SearchResult(title,description,url,None))
            return searchResults
            
        #50件以上ある場合は、分割してダウンロードし、統合する
        else:
            if resultCount>1000: resultCount=1000
            res1=self._search(query,start,50)
            if res1.TotalCount > start+50:
                res2=self._search(query,start+50,resultCount-50)
                res1.extend(res2)
                del res2
            return res1
        
#ディレクトリサーチ
class DirectorySearchEngine(KeywordSearch.SearchEngine):
    '''
    #カテゴリ検索結果上位100とってくる例
    from yi01lib.SearchEngine import YahooJapan
    
    dyahoo=YahooJapan.DirectorySearchEngine("kensaku_search")
    dyahoo.ResultCount=20
    searchResults=dyahoo.Search(u"Yahoo")
    
    print "="*10 + "Directiries" + "="*10
    for result in searchResults.DirSearchResults:
        print result

    print "="*10 + "Sites" + "="*10
    for result in searchResults.SiteSearchResults:
        print result
    '''
    def __init__(self):
        self._apiKey=Util.API_Key["YahooJapan"]
        self.ResultCount=50 #取得件数
        self.JapaneseOnly=False
        self.Path=None

    def Search(self,unicodedQuery):
        query=unicodedQuery.encode("utf-8")
        return self._search(query,0,self.ResultCount)

    def _search(self,query,start,resultCount,):
        #Yahoo APIは、同時取得50までなので、
        if(resultCount<=50):
            url="http://dir.yahooapis.jp/Category/V1/directorySearch"
            q={"appid":self._apiKey,"query":query,"results":resultCount,"start":start+1}
            if self.JapaneseOnly:
                q["country"]="jp"
            if not self.Path is None:
                q["path"]=self.Path
            
            url+="?"+urllib.urlencode(q)
            
            #ダウンロード
            xml=Util.Download(url)
            
            dTotal=re.compile('<DirSearchResults TotalCount="([0-9]+)"[^>]*>(.*?)</DirSearchResults>',re.M|re.S).search(xml)
            dTotalCount=-1 if dTotal is None else int(dTotal.group(1))
            dItems="" if dTotal is None else dTotal.group(2)
            sTotal=re.compile('<SiteSearchResults TotalCount="([0-9]+)"[^>]*>(.*?)</SiteSearchResults>',re.M|re.S).search(xml)
            sTotalCount=-1 if sTotal is None else int(sTotal.group(1))
            sItems="" if sTotal is None else sTotal.group(2)
            
            dirSearchResults=DirectorySearchResult(dTotalCount,sTotalCount)
            
            for dResult in re.compile('<Item>(.*?)</Item>',re.M|re.S).finditer(dItems):
                result=dResult.group(1)
                
                m=re.compile('<Id>(.*?)</Id>').search(result)
                dirID=m.group(1)
                m=re.compile('<Title>(.*?)</Title>').search(result)
                title=m.group(1)
                m=re.compile('<Path>(.*?)</Path>').search(result)
                path=m.group(1)
                m=re.compile('<Url>(.*?)</Url>').search(result)
                url=m.group(1)
                m=re.compile('<Created>(.*?)</Created>').search(result)
                created=m.group(1)
                m=re.compile('<New>(.*?)</New>').search(result)
                isNew=(m.group(1)=="1")
                
                dItem=DirSearchResultItem(dirID,title,path,url,created,isNew)
                dirSearchResults.DirSearchResults.append(dItem)
                
            for sResult in re.compile('<Item>(.*?)</Item>',re.M|re.S).finditer(sItems):
                result=sResult.group(1)
                
                m=re.compile('<Parent>(.*?)</Parent>').search(result)
                parentID=m.group(1)
                m=re.compile('<ParentName>(.*?)</ParentName>').search(result)
                parentTitle=m.group(1)
                m=re.compile('<ParentCategoryUrl>(.*?)</ParentCategoryUrl>').search(result)
                parentCategoryUrl=m.group(1)
                m=re.compile('<Title>(.*?)</Title>').search(result)
                title=m.group(1)
                m=re.compile('<Summary>(.*?)</Summary>').search(result)
                summary=m.group(1)
                m=re.compile('<Url>(.*?)</Url>').search(result)
                url=m.group(1)
                m=re.compile('<Created>(.*?)</Created>').search(result)
                created=m.group(1)
                m=re.compile('<New>(.*?)</New>').search(result)
                feature=int(m.group(1))*SiteSearchResultItem.NEW
                m=re.compile('<Picks>(.*?)</Picks>').search(result)
                feature+=int(m.group(1))*SiteSearchResultItem.PICK
                m=re.compile('<Cool>(.*?)</Cool>').search(result)
                feature+=int(m.group(1))*SiteSearchResultItem.COOL
                
                sItem=SiteSearchResultItem(parentID,parentTitle,parentCategoryUrl,title,url,summary,created,feature)
                dirSearchResults.SiteSearchResults.append(sItem)
            
            return dirSearchResults
            
        #50件以上ある場合は、分割してダウンロードし、統合する
        else:
            res1=self._search(query,start,50)
            if res1.DirSearchResults.TotalCount > start+50 or res1.SiteSearchResults.TotalCount > start+50:
                res2=self._search(query,start+50,resultCount-50)
                res1.DirSearchResults.extend(res2.DirSearchResults)
                res1.SiteSearchResults.extend(res2.SiteSearchResults)
                del res2
            return res1

class DirSearchResultItem:
    def __init__(self,dirID,title,path,url,created,new):
        self.DirectoryID=id
        self.Title=unicode(title,"utf-8")
        self.Path=path
        self.URL=url
        self.CreatedDate=created
        self.IsNew=new

    def __str__(self):
        return "[title=%s\nPath=%s\nURL=%s]\n" % (Util.JapaneseConsole(self.Title),Util.JapaneseConsole(self.Path),self.URL)

class SiteSearchResultItem(KeywordSearch.SearchResult):
    NEW=1
    PICK=2
    COOL=4
    
    def __init__(self,parent,parentName,parentCategoryURL,title,url,summary,created,feature):
        self.DirectoryID=parent
        self.DirectoryTitle=unicode(parentName,"utf-8")
        self.DirectoryURL=parentCategoryURL
        self.Feature=feature
        KeywordSearch.SearchResult.__init__(self,title,summary,url,created)

class DirectorySearchResult:
    def __init__(self,dirSearchTotalCount=None,siteSearchTotalCount=None):
        self.DirSearchResults=KeywordSearch.SearchResultCollection()
        self.SiteSearchResults=KeywordSearch.SearchResultCollection()
#Yahoo知恵袋
class ChiebukuroSearchEngine(KeywordSearch.SearchEngine):
    '''
    #「apple」で解決済みのQAをとる例
    from yi01lib.SearchEngine import YahooJapan
    
    qaYahoo=YahooJapan.ChiebukuroSearchEngine("kensaku_search")
    qaYahoo.ResultCount=10
    qaYahoo.Condition="solved"
    searchResult=qaYahoo.Search(u"apple")
    
    for result in searchResult:
        print "="*10 + "Question" + "="*10
        print result.Question

        print "="*10 + "Answer" + "="*10
        print result.BestAnswer
    '''
    def __init__(self):
        self._apiKey=Util.API_Key["YahooJapan"]
        self.ResultCount=50 #取得件数
        self.JapaneseOnly=False
        self.CategoryID=None
        self.Condition=None
        self.SortOrder=None
    
    def Search(self,unicodedQuery):
        query=unicodedQuery.encode("utf-8")
        return self._search(query,0,self.ResultCount)

    def _search(self,query,start,resultCount,):
        #Yahoo知恵袋APIは、同時取得100までなので、
        if(resultCount<=100):
            url="http://chiebukuro.yahooapis.jp/Chiebukuro/V1/questionSearch"
            q={"appid":self._apiKey,"query":query,"results":resultCount,"start":start+1}
            if self.JapaneseOnly:
                q["country"]="jp"
            
            #追加の検索パラメータ
            if not self.CategoryID is None: q["categoryid"]=self.CategoryID
            if not self.Condition is None: q["condition"]=self.Condition
            if not self.SortOrder is None: q["sort"]=self.SortOrder
                
            url+="?"+urllib.urlencode(q)
            
            #ダウンロード
            xml=Util.Download(url)
            
            mTotal=re.compile('totalResultsAvailable="([0-9]+)"').search(xml)
            totalCount=-1 if mTotal is None else int(mTotal.group(1))
            
            searchResults=KeywordSearch.SearchResultCollection(totalCount)
            
            for mQuestion in re.compile('<Question>(.*?)</Question>',re.M|re.S).finditer(xml):
                question=mQuestion.group(1)

                m=re.compile('<Content><!\[CDATA\[(.*?)\]\]></Content>',re.M|re.S).search(question)
                content=m.group(1)
                
                m=re.compile('<BestAnswer>(<!\[CDATA\[)?(.*?)(\]\]>)?</BestAnswer>',re.M|re.S).search(question)
                bestAnswer=m.group(2)
                
                m=re.compile('<Url>([^<]*?)</Url>').search(question)
                url=m.group(1)
                
                m=re.compile('<PostedDate>([^<]*?)</PostedDate>').search(question)
                postDate=-1 if m.group(1)=="" else Util.StrToTime(m.group(1))
                
                m=re.compile('<SolvedDate>([^<]*?)</SolvedDate>').search(question)
                solvedDate=-1 if m.group(1)=="" else Util.StrToTime(m.group(1))

                m=re.compile('<AnsCount>([^<]*?)</AnsCount>').search(question)
                ansCount=int(m.group(1))
                
                m=re.compile('<Condition>([^<]*?)</Condition>').search(question)
                condition=m.group(1)
                
                m=re.compile('<CategoryIdPath>([^<]*?)</CategoryIdPath>').search(question)
                categoryIDPath=m.group(1)
                
                res=ChiebukuroSearchResult(content,bestAnswer,url,postDate,solvedDate,ansCount,condition,categoryIDPath)
                searchResults.append(res)
            return searchResults
            
        #100件以上ある場合は、分割してダウンロードし、統合する
        else:
            res1=self._search(query,start,100)
            if res1.TotalCount > start+100:
                res2=self._search(query,start+100,resultCount-100)
                res1.extend(res2)
                del res2
            return res1
class ChiebukuroSearchResult:
    def __init__(self,question,bestAnswer,url,postDate,solvedDate,ansCount,condition,categoryIDPath):
        self.Question=unicode(question,"utf-8")
        self.BestAnswer=unicode(bestAnswer,"utf-8")
        self.URL=url
        self.PostDate=postDate
        self.SolvedDate=solvedDate
        self.AnswerCount=ansCount
        self.Condition=condition
        self.CategoryIDPath=categoryIDPath
    def __str__(self):
        return "[Q:%s  A:%s URL:%s]"%(Util.JapaneseConsole(self.Question),Util.JapaneseConsole(self.BestAnswer),Util.JapaneseConsole(self.URL))
#Yahooブログ検索
class BlogSearchEngine(KeywordSearch.SearchEngine):
    '''
    #公式のAPIではないので、激しい使い方をしないように！！
    
    #使用例
    from yi01lib.SearchEngine import YahooJapan
    
    bYahoo=YahooJapan.BlogSearch()
    bYahoo.Yuragi="off"
    for result in bYahoo.Search(u"京都"):
        print result
    '''    
    
    def __init__(self):
        self.ResultCount=50 #取得件数
        self.Yuragi=None
    
    def Search(self,unicodedQuery):
        query=unicodedQuery.encode("euc-jp")
        return self._search(query,1,self.ResultCount)

    def _search(self,query,page,resultCount,):
        if(resultCount<=100):
            url="http://blog-search.yahoo.co.jp/rss"
            qq="p=%s" % urllib.quote(query)
            q={"n":resultCount,"b":page}
            if not self.Yuragi is None:
                q["yuragi"]=self.Yuragi
            
            url+=("?%s&" % qq) +urllib.urlencode(q)
            
            #ダウンロード
            xml=Util.Download(url)
            
            mTotal=re.compile('<totalCount>([0-9]+)</totalCount>').search(xml)
            totalCount=-1 if mTotal is None else int(mTotal.group(1))
            
            searchResults=KeywordSearch.SearchResultCollection(totalCount)
            
            for mResult in re.compile('<item>(.*?)</item>',re.M|re.S).finditer(xml):
                result=mResult.group(1)
                
                m=re.compile('<title>(.*?)</title>').search(result)
                title=m.group(1)
                m=re.compile('<description>(.*?)</description>',re.M|re.S).search(result)
                description=m.group(1)
                m=re.compile('<redirector:origLink>(.*?)</redirector:origLink>').search(result)
                url=m.group(1)
                m=re.compile('<pubDate>(.*?)</pubDate>').search(result)
                timestamp=Util.StrToTime(m.group(1))

                searchResult=KeywordSearch.SearchResult(title,description,url,timestamp)
                m=re.compile('<source url="([^"]+)">(.*?)</source>').search(result)
                source=m.group(2)
                searchResult.Source=source

                searchResults.append(searchResult)
            return searchResults
            
        #100件以上ある場合は、分割してダウンロードし、統合する
        else:
            res1=self._search(query,page,100)
            if res1.TotalCount > page*100:
                res2=self._search(query,page+1,resultCount-100)
                res1.extend(res2)
                del res2
            return res1

