1 per 2 week a CUDA topic: textbook chapter, pair programming session or project Target aurdiance is torch programmers tired of CUDA tutorial hell ### Lecture 1 Goal - Integrate a CUDA kernel inside a pytorch profile - Learn how to profile it ![[Pasted image 20250316213705.png]] ```python import torch from torch.profiler import profile, record_function, ProfilerActivity # ## Default way to use profiler # with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA]) as prof: # for _ in range(10): # a = torch.square(torch.randn(10000, 10000).cuda()) # prof.export_chrome_trace("trace.json") ## With warmup and skip # https://pytorch.org/docs/stable/profiler.html # Non-default profiler schedule allows user to turn profiler on and off # on different iterations of the training loop; # trace_handler is called every time a new trace becomes available def trace_handler(prof): print(prof.key_averages().table( sort_by="self_cuda_time_total", row_limit=-1)) prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json") with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ], # In this example with wait=1, warmup=1, active=2, repeat=1, # profiler will skip the first step/iteration, # start warming up on the second, record # the third and the forth iterations, # after which the trace will become available # and on_trace_ready (when set) is called; # the cycle repeats starting with the next step schedule=torch.profiler.schedule( wait=1, warmup=1, active=2, repeat=1), on_trace_ready=trace_handler # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log') # used when outputting for tensorboard ) as p: for iter in range(10): torch.square(torch.randn(10000, 10000).cuda()) # send a signal to the profiler that the next iteration has started p.step() ``` ![[Pasted image 20250316220750.png]] ```python # Adapted straight from https://triton-lang.org/main/getting-started/tutorials/02-fused-softmax.html import triton import triton.language as tl import torch # if @triton.jit(interpret=True) does not work, please use the following two lines to enable interpret mode # import os # os.environ["TRITON_INTERPRET"] = "1" @triton.jit def square_kernel(output_ptr, input_ptr, input_row_stride, output_row_stride, n_cols, BLOCK_SIZE: tl.constexpr): # The rows of the softmax are independent, so we parallelize across those row_idx = tl.program_id(0) # The stride represents how much we need to increase the pointer to advance 1 row row_start_ptr = input_ptr + row_idx * input_row_stride # The block size is the next power of two greater than n_cols, so we can fit each # row in a single block col_offsets = tl.arange(0, BLOCK_SIZE) input_ptrs = row_start_ptr + col_offsets # Load the row into SRAM, using a mask since BLOCK_SIZE may be > than n_cols row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')) square_output = row * row # Write back output to DRAM output_row_start_ptr = output_ptr + row_idx * output_row_stride output_ptrs = output_row_start_ptr + col_offsets tl.store(output_ptrs, square_output, mask=col_offsets < n_cols) def square(x): n_rows, n_cols = x.shape # The block size is the smallest power of two greater than the number of columns in `x` BLOCK_SIZE = triton.next_power_of_2(n_cols) # Another trick we can use is to ask the compiler to use more threads per row by # increasing the number of warps (`num_warps`) over which each row is distributed. # You will see in the next tutorial how to auto-tune this value in a more natural # way so you don't have to come up with manual heuristics yourself. num_warps = 4 if BLOCK_SIZE >= 2048: num_warps = 8 if BLOCK_SIZE >= 4096: num_warps = 16 # Allocate output y = torch.empty_like(x) # Enqueue kernel. The 1D launch grid is simple: we have one kernel instance per row o # f the input matrix square_kernel[(n_rows, )]( y, x, x.stride(0), y.stride(0), n_cols, num_warps=num_warps, BLOCK_SIZE=BLOCK_SIZE, ) return y torch.manual_seed(0) x = torch.randn(1823, 781, device='cuda') y_triton = square(x) y_torch = torch.square(x) assert torch.allclose(y_triton, y_torch), (y_triton, y_torch) @triton.testing.perf_report( triton.testing.Benchmark( x_names=['N'], # argument names to use as an x-axis for the plot x_vals=[128 * i for i in range(2, 100)], # different possible values for `x_name` line_arg='provider', # argument name whose value corresponds to a different line in the plot line_vals=[ 'triton', 'torch-native', 'torch-compile' ], # possible values for `line_arg`` line_names=[ "Triton", "Torch (native)", "Torch (compiled)" ], # label name for the lines styles=[('blue', '-'), ('green', '-'), ('green', '--')], # line styles ylabel="GB/s", # label name for the y-axis plot_name="square() performance", # name for the plot. Used also as a file name for saving the plot. args={'M': 4096}, # values for function arguments not in `x_names` and `y_name` )) def benchmark(M, N, provider): x = torch.randn(M, N, device='cuda', dtype=torch.float32) quantiles = [0.5, 0.2, 0.8] if provider == 'torch-native': ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.square(x), quantiles=quantiles) if provider == 'triton': ms, min_ms, max_ms = triton.testing.do_bench(lambda: square(x), quantiles=quantiles) if provider == 'torch-compile': ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.compile(torch.square)(x), quantiles=quantiles) gbps = lambda ms: 2 * x.nelement() * x.element_size() * 1e-9 / (ms * 1e-3) return gbps(ms), gbps(max_ms), gbps(min_ms) benchmark.run(show_plots=True, print_data=True, save_path='.') ``` ![[Pasted image 20250316224041.png]] ![[Pasted image 20250316224643.png]]