﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;

using Analyzer = Lucene.Net.Analysis.Analyzer;
using StandardAnalyzer = Lucene.Net.Analysis.Standard.StandardAnalyzer;
using Document = Lucene.Net.Documents.Document;
using FilterIndexReader = Lucene.Net.Index.FilterIndexReader;
using IndexReader = Lucene.Net.Index.IndexReader;
using QueryParser = Lucene.Net.QueryParsers.QueryParser;
using FSDirectory = Lucene.Net.Store.FSDirectory;
using Version = Lucene.Net.Util.Version;
using Collector = Lucene.Net.Search.Collector;
using IndexSearcher = Lucene.Net.Search.IndexSearcher;
using Query = Lucene.Net.Search.Query;
using ScoreDoc = Lucene.Net.Search.ScoreDoc;
using Scorer = Lucene.Net.Search.Scorer;
using Searcher = Lucene.Net.Search.Searcher;
using TopScoreDocCollector = Lucene.Net.Search.TopScoreDocCollector;


namespace FisheryPlatform.Search.Index
{
    public class FPSearcher
    {
        private class AnonymousClassCollector : Collector
        {
            #region
            private Scorer scorer;
            private int docBase;

            // simply print docId and score of every matching document
            public override void Collect(int doc)
            {
                System.Console.Out.WriteLine("doc=" + doc + docBase + " score=" + scorer.Score());
            }

            public override bool AcceptsDocsOutOfOrder()
            {
                return true;
            }

            public override void SetNextReader(IndexReader reader, int docBase)
            {
                this.docBase = docBase;
            }

            public override void SetScorer(Scorer scorer)
            {
                this.scorer = scorer;
            }
            #endregion
        }

        /// <summary>Use the norms from one field for all fields.  Norms are read into memory,
        /// using a byte of memory per document per searched field.  This can cause
        /// search of large collections with a large number of fields to run out of
        /// memory.  If all of the fields contain only a single token, then the norms
        /// are all identical, then single norm vector may be shared. 
        /// </summary>
        private class OneNormsReader : FilterIndexReader
        {
            #region
            private System.String field;

            public OneNormsReader(IndexReader in_Renamed, System.String field)
                : base(in_Renamed)
            {
                this.field = field;
            }

            public override byte[] Norms(System.String field)
            {
                return in_Renamed.Norms(this.field);
            }
            #endregion
        }

        private FPSearcher()
        {
        }

        public class FPSearchCondition
        {
            #region
            public string IndexDirectory { get; set; }
            public string SearchField { get; set; }
            public int Repeat { get; set; }
            public string Queries { get; set; }
            public bool Raw { get; set; }
            /// <summary>
            /// 标准
            /// </summary>
            public string NormsField { get; set; }
            /// <summary>
            /// hitsPerPage
            /// </summary>
            public int Paging { get; set; }
            public int CurrentPageIndex { get; set; }
            #endregion
        }

        public class FPSearchResult
        {
            #region
            public FPSearchResult()
            {
                Raw = new List<string>();
                List = new List<string>();
            }

            public int Index { get; set; }
            public int Count { get; set; }
            public int TotalHits { get; set; }
            public List<string> Raw { get; set; }
            public List<string> List { get; set; }
            #endregion
        }

        /// <summary>
        /// Simple command-line based search demo. 
        /// </summary>
        public static FPSearchResult Search(FPSearchCondition sCondition)
        {
            #region
            FPSearchResult searchResult = new FPSearchResult();

            sCondition.IndexDirectory = @"E:\资料\Lucene\Lucene原理与代码分析完整版\indexDir\Files";
            sCondition.Repeat = 0;
            sCondition.SearchField = "contents";
            //args = new string[] { "-index", @"E:\资料\Lucene\Lucene原理与代码分析完整版\indexDir\Files" };
            //System.String usage = "Usage:\t" + typeof(SearchFiles) + "[-index dir] [-field f] [-repeat n] [-queries file] [-raw] [-norms field] [-paging hitsPerPage]";
            //usage += "\n\tSpecify 'false' for hitsPerPage to use streaming instead of paging search.";

            bool paging = sCondition.Paging > 0 ? true : false;

            IndexReader reader = IndexReader.Open(FSDirectory.Open(new System.IO.DirectoryInfo(sCondition.IndexDirectory)), true); // only searching, so read-only=true

            if (string.IsNullOrEmpty(sCondition.NormsField) == false)
                reader = new OneNormsReader(reader, sCondition.NormsField);

            Searcher searcher = new IndexSearcher(reader);
            Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);

            QueryParser parser = new QueryParser(Version.LUCENE_CURRENT,sCondition.SearchField, analyzer);
            Query query = parser.Parse(sCondition.Queries.Trim());
            //System.Console.Out.WriteLine("Searching for: " + query.ToString(sCondition.SearchField));
            if (sCondition.Repeat > 0)
            {
                // repeat & time as benchmark
                //System.DateTime start = System.DateTime.Now;
                for (int i = 0; i < sCondition.Repeat; i++)
                {
                    searcher.Search(query, null, 100);
                }
                //System.DateTime end = System.DateTime.Now;
                //System.Console.Out.WriteLine("Time: " + (end.Millisecond - start.Millisecond) + "ms");
            }

            if (paging)
            {
                searchResult = DoPagingSearch(searcher, query, sCondition.CurrentPageIndex, sCondition.Paging, sCondition.Raw, sCondition.Queries == null);
            }
            else
            {
                DoStreamingSearch(searcher, query);
            }
            reader.Close();
            return searchResult;
            #endregion
        }

        /// <summary> This method uses a custom HitCollector implementation which simply prints out
        /// the docId and score of every matching document. 
        /// 
        /// This simulates the streaming search use case, where all hits are supposed to
        /// be processed, regardless of their relevance.
        /// </summary>
        public static void DoStreamingSearch(Searcher searcher, Query query)
        {
            Collector streamingHitCollector = new AnonymousClassCollector();

            searcher.Search(query, streamingHitCollector);
        }

        /// <summary> This demonstrates a typical paging search scenario, where the search engine presents 
        /// pages of size n to the user. The user can then go to the next page if interested in
        /// the next hits.
        /// 
        /// When the query is executed for the first time, then only enough results are collected
        /// to fill 5 result pages. If the user wants to page beyond this limit, then the query
        /// is executed another time and all hits are collected.
        /// 
        /// </summary>
        public static FPSearchResult DoPagingSearch(Searcher searcher, Query query, int currentPageIndex, int hitsPerPage, bool raw, bool interactive)
        {
            #region
            FPSearchResult searchResult = new FPSearchResult();

            // Collect enough docs to show 5 pages
            TopScoreDocCollector collector = TopScoreDocCollector.create(5 * hitsPerPage, false);
            searcher.Search(query, collector);
            ScoreDoc[] hits = collector.TopDocs().scoreDocs;

            int numTotalHits = collector.GetTotalHits();
            //System.Console.Out.WriteLine(numTotalHits + " total matching documents");
            searchResult.TotalHits = numTotalHits;

            if ((currentPageIndex - 1) * hitsPerPage < numTotalHits)
            {
                searchResult.Index = (currentPageIndex - 1) * hitsPerPage;
            }
            else
            {
                searchResult.List.Add("No such page");
            }
            searchResult.Count = System.Math.Min(numTotalHits, hitsPerPage);

            if (searchResult.Count > hits.Length)
            {
                #region
                //System.Console.Out.WriteLine("Only results 1 - " + hits.Length + " of " + numTotalHits + " total matching documents collected.");
                //System.Console.Out.WriteLine("Collect more (y/n) ?");

                collector = TopScoreDocCollector.create(numTotalHits, false);
                searcher.Search(query, collector);
                hits = collector.TopDocs().scoreDocs;
                #endregion
            }

            searchResult.Count = System.Math.Min(hits.Length, searchResult.Index + hitsPerPage);

            for (int i = searchResult.Index; i < searchResult.Count; i++)
            {
                #region
                if (raw)
                {
                    // output raw format
                    //System.Console.Out.WriteLine("doc=" + hits[i].doc + " score=" + hits[i].score);
                    searchResult.Raw.Add("doc=" + hits[i].doc + " score=" + hits[i].score);
                    continue;
                }
                #endregion

                #region
                Document doc = searcher.Doc(hits[i].doc);
                System.String path = doc.Get("path");
                if (path != null)
                {
                    searchResult.List.Add((i + 1) + ". " + path);
                    //System.Console.Out.WriteLine((i + 1) + ". " + path);
                    System.String title = doc.Get("title");
                    if (title != null)
                    {
                        searchResult.List.Add("   Title: " + doc.Get("title"));
                        //System.Console.Out.WriteLine("   Title: " + doc.Get("title"));
                    }

                    string contents = doc.Get("contents");
                    if (string.IsNullOrEmpty(contents) == false)
                    {
                        searchResult.List.Add(" contents:" + contents);
                    }
                }
                else
                {
                    searchResult.List.Add((i + 1) + ". " + "No path for this document");
                    //System.Console.Out.WriteLine((i + 1) + ". " + "No path for this document");
                }
                searchResult.List.Add("--------------<br/>");
                #endregion
            }
            return searchResult;
            #endregion
        }
    }
}
