
#自测5/5通过，但是不清楚是否能够通过所有后台系统的测试用例。


import math
from collections import defaultdict

def solve():
    # 读取输入
    N = int(input())  # 文档总数
    documents = []
    for _ in range(N):
        doc = input().strip().lower().split()
        documents.append(doc)
    
    K = int(input())  # 窗口大小
    P = int(input())  # 查询数量
    
    results = []
    
    for _ in range(P):
        query_line = input().strip().split()
        t = int(query_line[0])  # 查询时间点
        q = query_line[1:]  # 查询词列表，转为小写
        q = [word.lower() for word in q]
        
        # 确定窗口范围 [t-K+1, t]
        start_idx = t - K + 1
        end_idx = t
        
        # 获取窗口内的文档
        window_docs = documents[start_idx:end_idx + 1]
        window_size = len(window_docs)
        
        # 计算每个文档的权重 (从新到旧，j=1是最新的)
        weights = []
        for j in range(1, window_size + 1):
            weight = (K - j + 1) / K
            weights.append(weight)
        
        # 计算窗口内所有词的文档频率
        term_doc_freq = defaultdict(int)
        for doc in window_docs:
            unique_terms = set(doc)
            for term in unique_terms:
                term_doc_freq[term] += 1
        
        # 计算IDF
        def calculate_idf(term):
            df = term_doc_freq[term]
            return math.log((window_size + 1) / (df + 1)) + 1
        
        # 计算查询向量的TF-IDF
        query_tf = defaultdict(int)
        for word in q:
            query_tf[word] += 1
        
        query_vector = {}
        query_norm_sq = 0
        for word, tf in query_tf.items():
            idf = calculate_idf(word)
            tfidf = tf * idf
            query_vector[word] = tfidf
            query_norm_sq += tfidf * tfidf
        query_norm = math.sqrt(query_norm_sq)
        
        # 计算每个文档的加权余弦相似度
        best_similarity = -1
        best_doc_id = -1
        
        for i, doc in enumerate(window_docs):
            doc_id = start_idx + i
            weight = weights[i]
            
            # 计算文档的TF
            doc_tf = defaultdict(int)
            for word in doc:
                doc_tf[word] += 1
            
            # 计算文档的TF-IDF向量（加权）
            doc_vector = {}
            doc_norm_sq = 0
            for word, tf in doc_tf.items():
                idf = calculate_idf(word)
                tfidf = tf * idf * weight  # 应用时间权重
                doc_vector[word] = tfidf
                doc_norm_sq += tfidf * tfidf
            doc_norm = math.sqrt(doc_norm_sq)
            
            # 计算余弦相似度
            dot_product = 0
            for word in query_vector:
                if word in doc_vector:
                    dot_product += query_vector[word] * doc_vector[word]
            
            if query_norm > 0 and doc_norm > 0:
                similarity = dot_product / (query_norm * doc_norm)
            else:
                similarity = 0
            
            # 更新最佳匹配
            if similarity > best_similarity:
                best_similarity = similarity
                best_doc_id = doc_id
            elif similarity == best_similarity and doc_id < best_doc_id:
                # 相同相似度时选择更早的文档
                best_doc_id = doc_id
        
        # 检查是否满足阈值
        if best_similarity >= 0.6:
            results.append(best_doc_id)
        else:
            results.append(-1)
    
    # 输出结果
    print(' '.join(map(str, results)))

if __name__ == "__main__":
    solve()


#测试数据
# 输入：
# 5                                    # 文档总数
# breaking news finance market         # 文档0
# sports football world cup           # 文档1  
# finance stock market rises          # 文档2
# tech ai model training              # 文档3
# finance market crash report         # 文档4
# 3                                   # 窗口大小K
# 3                                   
# 4 finance market                   
# 5 ai model                          
# 3 travel guide                      

# 输出：
# 4 3 -1