from z3 import Optimize, Int, If, And, Or, Sum, IntNumRef, sat
import signal
import sys
import os
def _timeout_handler(signum, frame):
    raise TimeoutError("solver timeout")


# Parse input: accept input and optional output from command line
# Usage: python main.py <input_file_or_base> [output_file]
# If the first argument is an existing file path, open it; otherwise try appending ".in".
# If no input argument is provided, print usage and exit.
input_arg = sys.argv[1] if len(sys.argv) > 1 else exit("Usage: python main.py <input_file_or_base> [output_file]")
if os.path.exists(input_arg):
    infile = input_arg
elif os.path.exists(input_arg + '.in'):
    infile = input_arg + '.in'
else:
    raise FileNotFoundError(f"Input file not found: {input_arg} or {input_arg}.in")

output = sys.argv[2] if len(sys.argv) > 2 else (os.path.splitext(infile)[0] + '.out')


# Read input file
with open(infile) as f:
    L, M, N = map(int, f.readline().split())
    addr, size, start, time = [], [], [], []
    for _ in range(N):
        a, s, st, t = map(int, f.readline().split())
        addr.append(a)
        size.append(s)
        start.append(st)
        time.append(t)

# Assume addr, size, N have been read
block_points = set()
for i in range(N):
    block_points.add(addr[i])
    block_points.add(addr[i] + size[i])
block_points = sorted(block_points)

blocks = []
block_size = []
for i in range(len(block_points) - 1):
    l, r = block_points[i], block_points[i+1]
    blocks.append((l, r))  # 每个block是[l, r)
    block_size.append(r - l)

# Build visit-to-block mapping
visit_blocks = [[] for _ in range(N)]  # visit_blocks[i] = [block_idx, ...]
block_visits_tmp = [[] for _ in range(len(blocks))]  # block_visits[j] = [visit_idx, ...]

for i in range(N):
    a, b = addr[i], addr[i] + size[i]
    for j, (l, r) in enumerate(blocks):
        if not (r <= a or l >= b):  # 有交集
            visit_blocks[i].append(j)
            block_visits_tmp[j].append(i)

# Keep only blocks that are actually used
used_block_indices = [j for j, vlist in enumerate(block_visits_tmp) if vlist]
blocks = [blocks[j] for j in used_block_indices]
block_size = [block_size[j] for j in used_block_indices]
block_visits = [block_visits_tmp[j] for j in used_block_indices]

# Update visit_blocks to new block indices
old_to_new = {old: new for new, old in enumerate(used_block_indices)}
for i in range(N):
    visit_blocks[i] = [old_to_new[j] for j in visit_blocks[i]]

# blocks: [(l, r), ...]
# visit_blocks[i]: list of block indices required by visit i
# block_visits[j]: list of visits that use block j

#print("Blocks:", visit_blocks)

# Variable definitions
t_load = [Int(f"t_load_{i}") for i in range(len(blocks))]
t_visit = [Int(f"t_visit_{i}") for i in range(N)]
t_offload = [Int(f"t_offload_{i}") for i in range(len(blocks))]

# Create optimizer
opt = Optimize()

# 1. Time non-negativity
for i in range(len(blocks)):
    opt.add(t_load[i] >= 0)
    opt.add(t_offload[i] >= 0)
for i in range(N):
    opt.add(t_visit[i] >= 0)

# 2. Constraints: blocks must be loaded before visits, visit cannot be earlier than its start,
# and a block's offload must be after all visits that use it finish.
for i in range(N):
    opt.add(t_visit[i] >= start[i])
    for blk in visit_blocks[i]:
        opt.add(t_visit[i] >= t_load[blk] + block_size[blk]*40)

for i in range(N):
    for blk in visit_blocks[i]:
        opt.add(t_offload[blk] >= t_visit[i] + time[i])
        # opt.add(t_offload[j] >= t_load[j] + block_size[j]*40)


# 3. Load/offload mutual exclusion: operations of the same class cannot overlap, and load/offload cannot overlap
for i in range(len(blocks)):
    for j in range(i+1, len(blocks)):
        opt.add(Or(t_load[i] + block_size[i]*40 <= t_load[j], t_load[j] + block_size[j]*40 <= t_load[i]))
        opt.add(Or(t_offload[i] + block_size[i]*40 <= t_offload[j], t_offload[j] + block_size[j]*40 <= t_offload[i]))
        opt.add(Or(t_load[i] + block_size[i]*40 <= t_offload[j], t_offload[j] + block_size[j]*40 <= t_load[i]))
        opt.add(Or(t_load[j] + block_size[j]*40 <= t_offload[i], t_offload[i] + block_size[i]*40 <= t_load[j]))

# According to example 3: visits with the same start time belong to the same computational task and can run in parallel;
# different tasks must be serialized. Group visits by start time to enforce inter-group serialization.
tasks = []  # 每个任务是访问索引列表
current_start = None
for i in range(N):
    if current_start is None or start[i] != current_start:
        tasks.append([])
        current_start = start[i]
    tasks[-1].append(i)

group_end_vars = [Int(f"t_group_end_{k}") for k in range(len(tasks))]
for k, group in enumerate(tasks):
    # 组内所有visit必须完全同时开始
    for i in group:
        opt.add(t_visit[i] >= start[i])
    if group:
        for i in group:
            opt.add(t_visit[i] == t_visit[group[0]])
    # 组完成时间定义为所有visit完成时间的最大值
    for i in group:
        opt.add(group_end_vars[k] >= t_visit[i] + time[i])
    # 组所需所有块必须在组内任一visit开始前已加载
    group_blocks = set()
    for i in group:
        for blk in visit_blocks[i]:
            group_blocks.add(blk)
    for blk in group_blocks:
        for i in group:
            opt.add(t_visit[i] >= t_load[blk] + block_size[blk]*40)


# 组间串行：组k完成后，组k+1的所有visit才能开始
for k in range(len(tasks) - 1):
    for j in tasks[k+1]:
        opt.add(group_end_vars[k] <= t_visit[j])


# 4. Memory capacity constraint: sampled approximation
# Ensure total memory occupied by loaded blocks at each sampled time point does not exceed M
for t in range(0, 100000, 40):
    opt.add(Sum([If(And(t >= t_load[j], t < t_offload[j] + block_size[j]*40), block_size[j], 0) for j in range(len(blocks))]) <= M)

# （不再需要逐项强制单调性，任务级串行约束已覆盖更早请求必须更早被满足的要求）


fin_time = Int('fin_time')
# 总完成时间为最后一组的完成时间
opt.add(fin_time == group_end_vars[-1])
for j in range(len(blocks)):
    opt.add(t_offload[j] >= t_load[j] + block_size[j]*40)
opt.minimize(fin_time)


signal.signal(signal.SIGALRM, _timeout_handler)
signal.alarm(9)
try:
    res = opt.check()
    signal.alarm(0)
    if res == sat:
        m = opt.model()
        #for i in range(len(blocks)):
            #print(f"t_load[{i}] = {m[t_load[i]]}, t_offload[{i}] = {m[t_offload[i]]}")
        #for i in range(N):
            #print(f"t_visit[{i}] = {m[t_visit[i]]}")
        fin_time_val = m.evaluate(fin_time).as_long() # type: ignore[attr-defined]
        actions = []
        for i in range(len(blocks)):
            l, r = blocks[i]
            t_load_val = m.evaluate(t_load[i]).as_long() # type: ignore[attr-defined]
            t_offload_val = m.evaluate(t_offload[i]).as_long() # type: ignore[attr-defined]
            actions.append(('Reload', t_load_val, l, r - l))
            # Offload可选输出（可保留，或按需去除）
            if t_offload_val + 40 * block_size[i] <= fin_time_val:
                actions.append(('Offload', t_offload_val, l, r - l))
        for i in range(N):
            t_visit_val = m.evaluate(t_visit[i]).as_long() # type: ignore[attr-defined]
            actions.append(('Visit', t_visit_val, i))
        actions.append(('Fin', fin_time_val))

        def action_key(act):
            type_order = {'Reload': 0, 'Visit': 1, 'Offload': 2, 'Fin': 3}
            return (act[1], type_order[act[0]])

        actions.sort(key=action_key)

        # Write formal output to file
        with open(output, 'w') as outf:
            for act in actions:
                if act[0] == 'Reload':
                    line = f"Reload {act[1]} {act[2]} {act[3]}"
                elif act[0] == 'Visit':
                    line = f"Visit {act[1]} {act[2]}"
                elif act[0] == 'Offload':
                    line = f"Offload {act[1]} {act[2]} {act[3]}"
                else:
                    line = f"Fin {act[1]}"
                print(line)
                outf.write(line + '\n')
                # STOP WHEN FIN
                if act[0] == 'Fin':
                    break
    else:
        print("unsat")
        with open(output, 'w') as outf:
            outf.write("UNSAT_OR_UNKNOWN\n")
except TimeoutError:
    with open(output, 'w') as outf:
        outf.write("TIMEOUT\n")
    print("Solver timed out after 10 seconds. Wrote TIMEOUT to", output)
    sys.exit(2)