#include <iostream>
#include <vector>
#include <cmath>
#include <algorithm>
#include <limits>

// --- 数据结构 ---
struct ServerType {
    int id;
    int g, k, m;
    long long max_b;
};

struct User {
    int id;
    int s, e, cnt;
    int samples_to_schedule;
    long long ready_to_send_time;
    int requests_sent;
    int last_server_idx;
    int last_npu_idx_in_server;
    
    // 新增: 用于紧迫度和完成时间预估
    double urgency;             // 紧迫度: 剩余样本/剩余时间
    long long est_finish_time;  // 预估所有样本完成的时间
    bool at_risk;               // 标记用户是否有超时风险
};

struct Decision {
    long long time;
    int server_idx;
    int npu_idx_in_server;
    int batch_size;
};

using namespace std;

// --- 全局变量和超参数 ---
int N, M, A, B;
vector<ServerType> server_types;
vector<User> users;
vector<vector<int>> latencies;
vector<vector<long long>> npu_free_time;
long long current_global_time;  // 新增: 跟踪全局时间

// 超参数
const long double MIGRATION_PENALTY = 50000.0;
const double URGENCY_WEIGHT = 0.7;         // 紧迫度权重
const double RISK_PRIORITY_BOOST = 1000.0; // 风险用户优先级提升

// --- 辅助函数 ---
// 新增: 计算紧迫度
double calculate_urgency(const User& user, long long current_time) {
    long long remaining_time = user.e - current_time;
    if (remaining_time <= 0) return numeric_limits<double>::max(); // 已超时，最高紧迫度
    
    // 紧迫度 = 剩余样本/剩余时间
    return (double)user.samples_to_schedule / remaining_time;
}

// 新增: 预估完成时间
long long estimate_finish_time(const User& user, long long current_time) {
    if (user.samples_to_schedule <= 0) return current_time;
    
    // 找到处理速度最快的服务器类型
    double best_processing_rate = 0;
    for (const auto& server : server_types) {
        best_processing_rate = max(best_processing_rate, (double)server.k);
    }
    
    // 使用最佳服务器估算所需的时间
    // 假设用最大可能的批次处理，每批次处理时间约为 sqrt(batch) / k
    long long samples_left = user.samples_to_schedule;
    long long est_time = current_time;
    
    // 最大批次大小
    long long max_batch = 0;
    for (const auto& server : server_types) {
        max_batch = max(max_batch, server.max_b);
    }
    max_batch = min(max_batch, samples_left);
    
    // 估算处理时间
    while (samples_left > 0) {
        long long batch = min(samples_left, max_batch);
        double process_time = ceil(sqrt((double)batch) / best_processing_rate);
        est_time += process_time + 20; // 假设每批额外20ms通信开销
        samples_left -= batch;
    }
    
    return est_time;
}

// 新增: 检查用户是否有超时风险
bool check_at_risk(const User& user, long long current_time) {
    // 如果预估完成时间超过截止时间，则认为有风险
    return user.est_finish_time >= user.e;
}

// 新增: 计算用户优先级分数 (越大越优先)
double calculate_priority(const User& user, long long current_time) {
    double priority_score = 0;
    
    // 基础分数: 反映当前是否可调度
    if (user.ready_to_send_time <= current_time) {
        priority_score += 10000; // 可立即调度的获得高基础分
    } else {
        // 未到发送时间的用户，降低优先级
        priority_score -= (user.ready_to_send_time - current_time);
    }
    
    // 紧迫度分数
    priority_score += user.urgency * URGENCY_WEIGHT * 1000;
    
    // 风险用户优先级提升
    if (user.at_risk) {
        priority_score += RISK_PRIORITY_BOOST;
    }
    
    // 截止时间越早，优先级越高
    priority_score += 1.0 / (user.e + 1) * 1000;
    
    return priority_score;
}

void read_input() {
    cin >> N;
    server_types.resize(N);
    for (int i = 0; i < N; ++i) {
        server_types[i].id = i;
        cin >> server_types[i].g >> server_types[i].k >> server_types[i].m;
    }

    cin >> M;
    users.resize(M);
    for (int i = 0; i < M; ++i) {
        users[i].id = i;
        cin >> users[i].s >> users[i].e >> users[i].cnt;
        users[i].samples_to_schedule = users[i].cnt;
        users[i].ready_to_send_time = users[i].s;
        users[i].requests_sent = 0;
        users[i].last_server_idx = -1;
        users[i].last_npu_idx_in_server = -1;
        users[i].urgency = 0.0;
        users[i].est_finish_time = 0;
        users[i].at_risk = false;
    }

    latencies.resize(N, vector<int>(M));
    for (int i = 0; i < N; ++i) {
        for (int j = 0; j < M; ++j) {
            cin >> latencies[i][j];
        }
    }

    cin >> A >> B;
    for (int i = 0; i < N; ++i) {
        server_types[i].max_b = (long long)(server_types[i].m - B) / A;
        if (server_types[i].max_b < 0) {
            server_types[i].max_b = 0;
        }
    }
}

void solve() {
    vector<vector<Decision>> solution(M);
    npu_free_time.resize(N);
    for (int i = 0; i < N; ++i) {
        npu_free_time[i].resize(server_types[i].g, 0);
    }

    current_global_time = 0;
    int users_finished = 0;
    
    // 初始化当前全局时间为最早的开始时间
    for (int i = 0; i < M; ++i) {
        current_global_time = min(current_global_time, (long long)users[i].s);
    }

    while (users_finished < M) {
        // --- 1. 更新所有用户的紧迫度和预估完成时间 ---
        for (int i = 0; i < M; ++i) {
            if (users[i].samples_to_schedule > 0) {
                users[i].urgency = calculate_urgency(users[i], current_global_time);
                users[i].est_finish_time = estimate_finish_time(users[i], current_global_time);
                users[i].at_risk = check_at_risk(users[i], current_global_time);
            }
        }
        
        // --- 2. 用户选择: 基于综合优先级 ---
        int user_to_schedule_idx = -1;
        double highest_priority = -numeric_limits<double>::max();
        
        for (int i = 0; i < M; ++i) {
            if (users[i].samples_to_schedule > 0) {
                double priority = calculate_priority(users[i], current_global_time);
                
                if (priority > highest_priority) {
                    highest_priority = priority;
                    user_to_schedule_idx = i;
                }
            }
        }

        if (user_to_schedule_idx == -1) {
            break; // 所有用户任务已调度完毕
        }

        User& currentUser = users[user_to_schedule_idx];
        long long send_time = max(currentUser.ready_to_send_time, current_global_time);
        
        // 更新全局时间
        current_global_time = send_time;

        // --- 3. 动态、全局的(服务器, NPU)选择 ---
        long double min_cost = numeric_limits<long double>::max();
        int best_server_idx = -1;
        int best_npu_idx = -1;
        long long best_batch_size = 0;

        // 首先尝试上次使用的NPU (避免不必要的迁移)
        if (currentUser.last_server_idx >= 0 && currentUser.last_npu_idx_in_server >= 0) {
            int s_idx = currentUser.last_server_idx;
            int n_idx = currentUser.last_npu_idx_in_server;
            
            long long current_batch_size = min((long long)currentUser.samples_to_schedule, server_types[s_idx].max_b);
            if (current_batch_size > 0) {
                long long arrival_time = send_time + latencies[s_idx][currentUser.id];
                long double inference_duration = ceil(sqrt((long double)current_batch_size) / server_types[s_idx].k);
                long long start_inference_time = max(arrival_time, npu_free_time[s_idx][n_idx]);
                long double eft = start_inference_time + inference_duration;
                
                // 不涉及迁移，直接使用计算出的完成时间作为成本
                min_cost = eft;
                best_server_idx = s_idx;
                best_npu_idx = n_idx;
                best_batch_size = current_batch_size;
            }
        }

        // 风险用户（预计超时的用户）可能需要考虑迁移到更快的服务器
        bool allow_migration = currentUser.at_risk || best_server_idx == -1;
        
        // 遍历所有服务器类型
        for (int i = 0; i < N; ++i) {
            // 如果用户没有风险且已经有最佳选择，就不考虑迁移
            if (!allow_migration && currentUser.last_server_idx != i) {
                continue;
            }
            
            // 计算当前服务器可处理的最大批次大小
            long long current_batch_size = min((long long)currentUser.samples_to_schedule, server_types[i].max_b);
            if (current_batch_size <= 0) {
                continue; // 该服务器无法处理样本，跳过
            }

            // 遍历该服务器上的所有NPU
            for (int j = 0; j < server_types[i].g; ++j) {
                // 如果不允许迁移，且不是当前NPU，则跳过
                if (!allow_migration && (i != currentUser.last_server_idx || j != currentUser.last_npu_idx_in_server)) {
                    continue;
                }
                
                // 如果是已经检查过的当前NPU，则跳过重复检查
                if (i == best_server_idx && j == best_npu_idx && best_server_idx != -1) {
                    continue;
                }
                
                // 计算预计完成时间 (EFT)
                long long arrival_time = send_time + latencies[i][currentUser.id];
                long double inference_duration = ceil(sqrt((long double)current_batch_size) / server_types[i].k);
                long long start_inference_time = max(arrival_time, npu_free_time[i][j]);
                long double eft = start_inference_time + inference_duration;

                // 计算总成本 = EFT + 迁移惩罚
                long double current_cost = eft;
                bool is_migration = (currentUser.requests_sent > 0 && (i != currentUser.last_server_idx || j != currentUser.last_npu_idx_in_server));
                
                if (is_migration) {
                    // 对风险用户适当降低迁移惩罚，鼓励寻找更快的资源
                    double migration_penalty_factor = currentUser.at_risk ? 0.6 : 1.0;
                    current_cost += MIGRATION_PENALTY * migration_penalty_factor;
                }
                
                // 优先考虑紧迫用户和风险用户的需求
                if (currentUser.at_risk) {
                    // 处理速度越快的NPU成本越低
                    current_cost *= 1.0 / (server_types[i].k * 0.2 + 0.8);
                }

                // 如果找到更优解，则更新
                if (current_cost < min_cost) {
                    min_cost = current_cost;
                    best_server_idx = i;
                    best_npu_idx = j;
                    best_batch_size = current_batch_size;
                }
            }
        }
        
        // 如果没有找到任何可行的放置位置，则需要有安全出口
        if (best_server_idx == -1) {
            // 这种情况在正常测试用例中不应发生，但为防止死循环，我们跳过该用户
            currentUser.samples_to_schedule = 0; 
            users_finished++;
            continue;
        }

        // --- 4. 风险用户的批次大小策略 ---
        // 风险用户使用较小批次以加速处理
        if (currentUser.at_risk && currentUser.est_finish_time > currentUser.e) {
            // 减小批次，加速处理，但不低于10个样本
            long long min_batch = 10;
            long long adjusted_batch = max(min_batch, best_batch_size / 2);
            best_batch_size = min(best_batch_size, adjusted_batch);
        }

        // --- 5. 更新状态 ---
        long long final_batch_size = best_batch_size;
        solution[currentUser.id].push_back({send_time, best_server_idx, best_npu_idx, (int)final_batch_size});

        // 更新NPU的空闲时间
        long long arrival_time = send_time + latencies[best_server_idx][currentUser.id];
        long double inference_duration = ceil(sqrt((double)final_batch_size) / server_types[best_server_idx].k);
        long long start_inference_time = max(arrival_time, npu_free_time[best_server_idx][best_npu_idx]);
        npu_free_time[best_server_idx][best_npu_idx] = start_inference_time + (long long)inference_duration;

        // 更新用户状态
        currentUser.samples_to_schedule -= final_batch_size;
        currentUser.ready_to_send_time = send_time + latencies[best_server_idx][currentUser.id] + 1;
        currentUser.requests_sent++;
        currentUser.last_server_idx = best_server_idx;
        currentUser.last_npu_idx_in_server = best_npu_idx;

        if (currentUser.samples_to_schedule <= 0) {
            users_finished++;
        }
    }

    // --- 输出结果 ---
    for (int i = 0; i < M; ++i) {
        cout << solution[i].size() << endl;
        for (size_t j = 0; j < solution[i].size(); ++j) {
            cout << solution[i][j].time << " " << solution[i][j].server_idx + 1 << " " << solution[i][j].npu_idx_in_server + 1 << " " << solution[i][j].batch_size << (j == solution[i].size() - 1 ? "" : " ");
        }
        cout << endl;
    }
}

int main() {
    ios_base::sync_with_stdio(false);
    cin.tie(NULL);
    read_input();
    solve();
    return 0;
}