#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
#include <limits>
#include <list>
#include <utility>

using namespace std;

// --- 数据结构 ---
using NpuId = pair<int, int>; 

struct ServerType {
    int id;
    int g, k, m;
    long long max_b; 
};

struct User {
    int id;
    int s, e, cnt;
    int samples_to_schedule;
    long long ready_to_send_time;
    int requests_sent;
    NpuId last_npu_id;
    list<NpuId> primary_nest;
    list<NpuId> reserve_nest;
    int impatience_counter;
    double priority;
    double urgency;
    bool at_risk;
};

struct Decision {
    long long time;
    int server_idx;
    int npu_idx_in_server;
    int batch_size;
};

struct CandidateNpu {
    NpuId id;
    long double cost;
    bool operator<(const CandidateNpu& other) const { return cost < other.cost; }
};

// --- 全局变量和超参数 ---
int N, M;
int MODEL_A, MODEL_B;
vector<ServerType> server_types;
vector<User> users;
vector<vector<int>> latencies;
vector<vector<long long>> npu_free_time;
long long current_global_time;

// 策略超参数
const size_t PRIMARY_NEST_SIZE = 3;
const size_t RESERVE_NEST_SIZE = 5;
const int IMPATIENCE_THRESHOLD = 2;
const double LOOKAHEAD_TIE_TOLERANCE = 1.15;
const double LOOKAHEAD_WEIGHT_B = 0.4;
const long double MIGRATION_PENALTY = 50000.0;
const double URGENCY_WEIGHT = 1000.0;
const double AT_RISK_WEIGHT = 2000.0;

// --- 辅助函数 ---
void update_user_priority_and_risk(User& user, long long current_time) {
    if (user.samples_to_schedule <= 0) {
        user.priority = -numeric_limits<double>::max();
        return;
    }
    user.urgency = (double)user.samples_to_schedule / max(1LL, (long long)user.e - current_time);
    user.at_risk = (current_time + (long long)(user.samples_to_schedule * 20)) >= user.e;

    double p = 0;
    if (user.ready_to_send_time <= current_time) p += 10000;
    else p -= (user.ready_to_send_time - current_time);
    p += user.urgency * URGENCY_WEIGHT;
    if (user.at_risk) p += AT_RISK_WEIGHT;
    p += 60000.0 / (user.e - user.s + 1.0);
    user.priority = p;
}

void update_nest(User& user, const NpuId& chosen_npu) {
    auto it_primary = find(user.primary_nest.begin(), user.primary_nest.end(), chosen_npu);
    if (it_primary != user.primary_nest.end()) {
        user.primary_nest.splice(user.primary_nest.begin(), user.primary_nest, it_primary); return;
    }
    auto it_reserve = find(user.reserve_nest.begin(), user.reserve_nest.end(), chosen_npu);
    if (it_reserve != user.reserve_nest.end()) {
        user.reserve_nest.erase(it_reserve);
        user.primary_nest.push_front(chosen_npu);
        if (user.primary_nest.size() > PRIMARY_NEST_SIZE) {
            user.reserve_nest.push_front(user.primary_nest.back());
            user.primary_nest.pop_back();
            if (user.reserve_nest.size() > RESERVE_NEST_SIZE) user.reserve_nest.pop_back();
        }
        return;
    }
    user.reserve_nest.push_front(chosen_npu);
    if (user.reserve_nest.size() > RESERVE_NEST_SIZE) user.reserve_nest.pop_back();
}

long double calculate_assignment_cost(const User& user, const NpuId& npu_id, long long send_time) {
    int s_idx = npu_id.first;
    if (s_idx < 0) return numeric_limits<long double>::max();
    long long arrival_time = send_time + latencies[s_idx][user.id];
    long long start_inference_time = max(arrival_time, npu_free_time[s_idx][npu_id.second]);
    long double cost = start_inference_time;
    if (user.requests_sent > 0 && npu_id != user.last_npu_id) {
        cost += MIGRATION_PENALTY;
    }
    return cost;
}

void read_input() {
    cin >> N; server_types.resize(N);
    for (int i = 0; i < N; ++i) { server_types[i].id = i; cin >> server_types[i].g >> server_types[i].k >> server_types[i].m; }
    cin >> M; users.resize(M);
    for (int i = 0; i < M; ++i) {
        users[i].id = i; cin >> users[i].s >> users[i].e >> users[i].cnt;
        users[i].samples_to_schedule = users[i].cnt; users[i].ready_to_send_time = users[i].s;
        users[i].requests_sent = 0; users[i].last_npu_id = {-1, -1};
        users[i].impatience_counter = 0; users[i].priority = 0;
    }
    latencies.resize(N, vector<int>(M));
    for (int i = 0; i < N; ++i) for (int j = 0; j < M; ++j) cin >> latencies[i][j];
    cin >> MODEL_A >> MODEL_B;
    for (int i = 0; i < N; ++i) {
        if (MODEL_A == 0) server_types[i].max_b = 1000;
        else server_types[i].max_b = (long long)(server_types[i].m - MODEL_B) / MODEL_A;
        if (server_types[i].max_b < 0) server_types[i].max_b = 0;
        server_types[i].max_b = min(server_types[i].max_b, 1000LL);
    }
}

void solve() {
    vector<vector<Decision>> solution(M);
    npu_free_time.resize(N);
    for (int i = 0; i < N; ++i) npu_free_time[i].resize(server_types[i].g, 0);

    if (M > 0) {
        long long min_s = numeric_limits<long long>::max();
        for(const auto& user : users) min_s = min(min_s, (long long)user.s);
        current_global_time = min_s;
    } else {
        current_global_time = 0;
    }

    vector<int> user_indices(M);
    for(int i=0; i<M; ++i) user_indices[i] = i;

    int users_finished = 0;
    while (users_finished < M) {
        for (int i = 0; i < M; ++i) {
            update_user_priority_and_risk(users[i], current_global_time);
        }
        
        sort(user_indices.begin(), user_indices.end(), [&](int a, int b){
            return users[a].priority > users[b].priority;
        });

        int user_A_idx = -1;
        for(int idx : user_indices) {
            if(users[idx].samples_to_schedule > 0 && users[idx].ready_to_send_time <= current_global_time) {
                user_A_idx = idx;
                break;
            }
        }

        // --- 核心修复点: 鲁棒的事件驱动时间推进 ---
        if (user_A_idx == -1) {
            long long next_event_time = numeric_limits<long long>::max();
            bool all_samples_scheduled = true;

            // 寻找下一个用户可发送的事件
            for(int i=0; i<M; ++i) {
                if(users[i].samples_to_schedule > 0) { 
                    all_samples_scheduled = false; 
                    next_event_time = min(next_event_time, users[i].ready_to_send_time); 
                }
            }
            if (all_samples_scheduled) {
                // 如果所有样本都调度完了，就退出
                users_finished = M;
                continue;
            }

            // 寻找下一个NPU变为空闲的事件
            for(int i=0; i<N; ++i) {
                for(int j=0; j<server_types[i].g; ++j) {
                    if (npu_free_time[i][j] > current_global_time) {
                        next_event_time = min(next_event_time, npu_free_time[i][j]);
                    }
                }
            }

            if (next_event_time <= current_global_time || next_event_time == numeric_limits<long long>::max()) {
                // 防止时间不推进或卡死，强制前进
                current_global_time++;
            } else {
                current_global_time = next_event_time;
            }
            continue; // 进入下一次循环，在新的时间点重新决策
        }

        User& currentUserA = users[user_A_idx];
        long long send_time = current_global_time;

        vector<CandidateNpu> candidates;
        for(const auto& npu : currentUserA.primary_nest) if(server_types[npu.first].max_b > 0) candidates.push_back({npu, calculate_assignment_cost(currentUserA, npu, send_time)});
        for(const auto& npu : currentUserA.reserve_nest) if(server_types[npu.first].max_b > 0) candidates.push_back({npu, calculate_assignment_cost(currentUserA, npu, send_time)});
        if (candidates.empty()) {
            for(int i=0; i<N; ++i) for(int j=0; j<server_types[i].g; ++j) if(server_types[i].max_b > 0) candidates.push_back({{i,j}, calculate_assignment_cost(currentUserA, {i,j}, send_time)});
        }
        
        if (candidates.empty()) { // 如果真的找不到任何一个能用的NPU
            currentUserA.samples_to_schedule = 0; 
            users_finished++;
            continue;
        }
        
        sort(candidates.begin(), candidates.end());

        NpuId best_npu_for_A = candidates[0].id;
        vector<CandidateNpu> tie_candidates;
        tie_candidates.push_back(candidates[0]);
        for (size_t i = 1; i < candidates.size() && i < 5; ++i) if (candidates[i].cost < candidates[0].cost * LOOKAHEAD_TIE_TOLERANCE) tie_candidates.push_back(candidates[i]);

        if (tie_candidates.size() > 1) {
            int user_B_idx = -1;
            for(int idx : user_indices) if (idx != user_A_idx && users[idx].samples_to_schedule > 0 && users[idx].ready_to_send_time <= current_global_time) { user_B_idx = idx; break; }

            if (user_B_idx != -1) {
                User& currentUserB = users[user_B_idx];
                long double min_total_cost = numeric_limits<long double>::max();
                for (const auto& cand_A : tie_candidates) {
                    long double cost_A = cand_A.cost;

                    long long original_free_time = npu_free_time[cand_A.id.first][cand_A.id.second];
                    long long batch_A = min((long long)currentUserA.samples_to_schedule, server_types[cand_A.id.first].max_b);
                    long double infer_A_dur = ceil(batch_A / (server_types[cand_A.id.first].k * sqrt((double)batch_A)));
                    long long arrival_A = send_time + latencies[cand_A.id.first][currentUserA.id];
                    npu_free_time[cand_A.id.first][cand_A.id.second] = max(arrival_A, original_free_time) + (long long)infer_A_dur;
                    
                    long double min_cost_B = calculate_assignment_cost(currentUserB, currentUserB.last_npu_id, send_time);
                    for(const auto& npu_B : currentUserB.primary_nest) if(server_types[npu_B.first].max_b > 0) min_cost_B = min(min_cost_B, calculate_assignment_cost(currentUserB, npu_B, send_time));
                    npu_free_time[cand_A.id.first][cand_A.id.second] = original_free_time;

                    long double total_cost = cost_A + LOOKAHEAD_WEIGHT_B * min_cost_B;
                    if (total_cost < min_total_cost) {
                        min_total_cost = total_cost;
                        best_npu_for_A = cand_A.id;
                    }
                }
            }
        }
        
        long long final_batch_size = min((long long)currentUserA.samples_to_schedule, server_types[best_npu_for_A.first].max_b);
        if (currentUserA.at_risk && final_batch_size > 1) final_batch_size = max(1LL, final_batch_size / 2);
        
        solution[currentUserA.id].push_back(Decision{send_time, best_npu_for_A.first, best_npu_for_A.second, (int)final_batch_size});

        long long arrival_time = send_time + latencies[best_npu_for_A.first][currentUserA.id];
        long double inference_duration = ceil(final_batch_size / (server_types[best_npu_for_A.first].k * sqrt((double)final_batch_size)));
        long long start_inference_time = max(arrival_time, npu_free_time[best_npu_for_A.first][best_npu_for_A.second]);
        npu_free_time[best_npu_for_A.first][best_npu_for_A.second] = start_inference_time + (long long)inference_duration;

        currentUserA.samples_to_schedule -= final_batch_size;
        currentUserA.ready_to_send_time = send_time + latencies[best_npu_for_A.first][currentUserA.id] + 1;
        currentUserA.requests_sent++;
        currentUserA.last_npu_id = best_npu_for_A;
        update_nest(currentUserA, best_npu_for_A);
        
        if (currentUserA.samples_to_schedule <= 0) {
            bool all_finished_check = true;
            for(int i = 0; i < M; ++i) {
                if (users[i].samples_to_schedule > 0) {
                    all_finished_check = false;
                    break;
                }
            }
            if(all_finished_check) users_finished = M;
        }
    }

    for (int i = 0; i < M; ++i) {
        cout << solution[i].size() << endl;
        for (size_t j = 0; j < solution[i].size(); ++j) {
            cout << solution[i][j].time << " " << solution[i][j].server_idx + 1 << " " << solution[i][j].npu_idx_in_server + 1 << " " << solution[i][j].batch_size << (j == solution[i].size() - 1 ? "" : " ");
        }
        cout << endl;
    }
}

int main() {
    ios_base::sync_with_stdio(false);
    cin.tie(NULL);
    read_input();
    solve();
    return 0;
}