diff --git a/challenges/colab_exports/README.md b/challenges/colab_exports/README.md new file mode 100644 index 00000000..5751eebf --- /dev/null +++ b/challenges/colab_exports/README.md @@ -0,0 +1,100 @@ +# LeetGPU Colab Notebooks + +Click the badges below to open the challenges directly in Google Colab. + +## Easy + +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/19_reverse_array.ipynb) **19_reverse_array** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/1_vector_add.ipynb) **1_vector_add** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/21_relu.ipynb) **21_relu** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/23_leaky_relu.ipynb) **23_leaky_relu** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/24_rainbow_table.ipynb) **24_rainbow_table** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/2_matrix_multiplication.ipynb) **2_matrix_multiplication** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/31_matrix_copy.ipynb) **31_matrix_copy** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/3_matrix_transpose.ipynb) **3_matrix_transpose** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/41_simple_inference.ipynb) **41_simple_inference** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/52_silu.ipynb) **52_silu** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/54_swiglu.ipynb) **54_swiglu** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/62_value_clipping.ipynb) **62_value_clipping** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/63_interleave.ipynb) **63_interleave** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/65_geglu.ipynb) **65_geglu** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/66_rgb_to_grayscale.ipynb) **66_rgb_to_grayscale** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/68_sigmoid.ipynb) **68_sigmoid** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/7_color_inversion.ipynb) **7_color_inversion** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/8_matrix_addition.ipynb) **8_matrix_addition** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/easy/9_1d_convolution.ipynb) **9_1d_convolution** + +## Medium + +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/10_2d_convolution.ipynb) **10_2d_convolution** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/11_3d_convolution.ipynb) **11_3d_convolution** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/13_histogramming.ipynb) **13_histogramming** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/16_prefix_sum.ipynb) **16_prefix_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/17_dot_product.ipynb) **17_dot_product** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/18_sparse_matrix_vector_multiplication.ipynb) **18_sparse_matrix_vector_multiplication** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/22_gemm.ipynb) **22_gemm** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/25_categorical_cross_entropy_loss.ipynb) **25_categorical_cross_entropy_loss** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/27_mean_squared_error.ipynb) **27_mean_squared_error** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/28_gaussian_blur.ipynb) **28_gaussian_blur** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/29_top_k_selection.ipynb) **29_top_k_selection** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/30_batched_matrix_multiplication.ipynb) **30_batched_matrix_multiplication** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/32_int8_quantized_matmul.ipynb) **32_int8_quantized_matmul** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/33_ordinary_least_squares.ipynb) **33_ordinary_least_squares** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/34_logistic_regression.ipynb) **34_logistic_regression** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/35_monte_carlo_integration.ipynb) **35_monte_carlo_integration** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/37_matrix_power.ipynb) **37_matrix_power** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/38_nearest_neighbor.ipynb) **38_nearest_neighbor** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/40_batch_normalization.ipynb) **40_batch_normalization** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/42_2d_max_pooling.ipynb) **42_2d_max_pooling** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/43_count_array_element.ipynb) **43_count_array_element** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/44_count_2d_array_element.ipynb) **44_count_2d_array_element** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/45_count_3d_array_element.ipynb) **45_count_3d_array_element** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/47_subarray_sum.ipynb) **47_subarray_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/48_2d_subarray_sum.ipynb) **48_2d_subarray_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/49_3d_subarray_sum.ipynb) **49_3d_subarray_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/4_reduction.ipynb) **4_reduction** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/50_rms_normalization.ipynb) **50_rms_normalization** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/51_max_subarray_sum.ipynb) **51_max_subarray_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/55_attn_w_linear_bias.ipynb) **55_attn_w_linear_bias** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/57_fp16_batched_matmul.ipynb) **57_fp16_batched_matmul** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/58_fp16_dot_product.ipynb) **58_fp16_dot_product** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/5_softmax.ipynb) **5_softmax** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/60_top_p_sampling.ipynb) **60_top_p_sampling** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/61_rope_embedding.ipynb) **61_rope_embedding** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/64_weight_dequantization.ipynb) **64_weight_dequantization** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/67_moe_topk_gating.ipynb) **67_moe_topk_gating** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/69_jacobi_stencil_2d.ipynb) **69_jacobi_stencil_2d** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/6_softmax_attention.ipynb) **6_softmax_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/70_segmented_prefix_sum.ipynb) **70_segmented_prefix_sum** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/71_parallel_merge.ipynb) **71_parallel_merge** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/72_stream_compaction.ipynb) **72_stream_compaction** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/75_sparse_matrix_dense_matrix_multiplication.ipynb) **75_sparse_matrix_dense_matrix_multiplication** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/76_adder_transformer.ipynb) **76_adder_transformer** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/78_2d_fft.ipynb) **78_2d_fft** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/80_grouped_query_attention.ipynb) **80_grouped_query_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/81_int4_matmul.ipynb) **81_int4_matmul** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/82_linear_recurrence.ipynb) **82_linear_recurrence** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/84_swiglu_mlp_block.ipynb) **84_swiglu_mlp_block** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/85_lora_linear.ipynb) **85_lora_linear** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/87_speculative_decoding_verification.ipynb) **87_speculative_decoding_verification** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/90_causal_depthwise_conv1d.ipynb) **90_causal_depthwise_conv1d** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/92_decaying_causal_attention.ipynb) **92_decaying_causal_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/94_ssm_selective_scan.ipynb) **94_ssm_selective_scan** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/medium/96_int8_kv_cache_attention.ipynb) **96_int8_kv_cache_attention** + +## Hard + +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/12_multi_head_attention.ipynb) **12_multi_head_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/14_multi_agent_sim.ipynb) **14_multi_agent_sim** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/15_sorting.ipynb) **15_sorting** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/20_kmeans_clustering.ipynb) **20_kmeans_clustering** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/36_radix_sort.ipynb) **36_radix_sort** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/39_Fast_Fourier_transform.ipynb) **39_Fast_Fourier_transform** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/46_bfs_shortest_path.ipynb) **46_bfs_shortest_path** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/53_casual_attention.ipynb) **53_casual_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/56_linear_attention.ipynb) **56_linear_attention** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/59_sliding_window_attn.ipynb) **59_sliding_window_attn** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/73_all_pairs_shortest_paths.ipynb) **73_all_pairs_shortest_paths** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/74_gpt2_block.ipynb) **74_gpt2_block** +- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/hard/93_llama_transformer_block.ipynb) **93_llama_transformer_block** + diff --git a/challenges/colab_exports/easy/19_reverse_array.ipynb b/challenges/colab_exports/easy/19_reverse_array.ipynb new file mode 100644 index 00000000..6229a23d --- /dev/null +++ b/challenges/colab_exports/easy/19_reverse_array.ipynb @@ -0,0 +1,519 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that reverses an array of 32-bit floating point\n numbers in-place. The program should perform an in-place reversal of input.\n

\n\n

Implementation Requirements

\n\n\n

Example 1:

\n
Input: [1.0, 2.0, 3.0, 4.0]\nOutput: [4.0, 3.0, 2.0, 1.0]
\n\n

Example 2:

\n
Input: [1.5, 2.5, 3.5]\nOutput: [3.5, 2.5, 1.5]
\n\n

Constraints

\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void reverse_array(float* input, int N) {}\n\n// input is device pointer\nextern \"C\" void solve(float* input, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n reverse_array<<>>(input, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef reverse_array_kernel(input: UnsafePointer[Float32, MutExternalOrigin], N: Int32):\n pass\n\n\n# input is a device pointer (i.e. pointer to memory on the GPU)\n@export\ndef solve(input: UnsafePointer[Float32, MutExternalOrigin], N: Int32) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(N, threadsPerBlock)\n\n var _kernel = ctx.compile_function[reverse_array_kernel, reverse_array_kernel]()\n ctx.enqueue_function(_kernel, input, N, grid_dim=blocksPerGrid, block_dim=threadsPerBlock)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input is a tensor on the GPU\ndef solve(input: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef reverse_kernel(input, N, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input is a tensor on the GPU\ndef solve(input: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n n_blocks = triton.cdiv(N // 2, BLOCK_SIZE)\n grid = (n_blocks,)\n\n reverse_kernel[grid](input, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Reverse Array\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, N: int):\n", + " assert input.shape == (N,)\n", + " assert input.dtype == torch.float32\n", + "\n", + " # Reverse the array in-place\n", + " input[:] = torch.flip(input, [0])\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"inout\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + "\n", + " test_cases = []\n", + "\n", + " # Fixed value test cases\n", + " test_cases.append(\n", + " {\"input\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype), \"N\": 4}\n", + " )\n", + "\n", + " test_cases.append({\"input\": torch.tensor([42.0], device=\"cuda\", dtype=dtype), \"N\": 1})\n", + "\n", + " test_cases.append({\"input\": torch.tensor([0.0] * 16, device=\"cuda\", dtype=dtype), \"N\": 16})\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [\n", + " 0.0,\n", + " 1.0,\n", + " 2.0,\n", + " 3.0,\n", + " 4.0,\n", + " 5.0,\n", + " 6.0,\n", + " 7.0,\n", + " 8.0,\n", + " 9.0,\n", + " 10.0,\n", + " 11.0,\n", + " 12.0,\n", + " 13.0,\n", + " 14.0,\n", + " 15.0,\n", + " 16.0,\n", + " 17.0,\n", + " 18.0,\n", + " 19.0,\n", + " 20.0,\n", + " 21.0,\n", + " 22.0,\n", + " 23.0,\n", + " 24.0,\n", + " 25.0,\n", + " 26.0,\n", + " 27.0,\n", + " 28.0,\n", + " 29.0,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"N\": 30,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\"input\": torch.tensor([-1.0, -2.0, -3.0, -4.0], device=\"cuda\", dtype=dtype), \"N\": 4}\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\"input\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype), \"N\": 4}\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [0.000001, 0.0000001, 0.00000001, 0.000000001], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1000000.0, 10000000.0, -1000000.0, -10000000.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # Random range test cases\n", + " test_cases.append(\n", + " {\"input\": torch.empty(32, device=\"cuda\", dtype=dtype).uniform_(0.0, 32.0), \"N\": 32}\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\"input\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(0.0, 7.0), \"N\": 1000}\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\"input\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(0.0, 1.0), \"N\": 10000}\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 25000000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/1_vector_add.ipynb b/challenges/colab_exports/easy/1_vector_add.ipynb new file mode 100644 index 00000000..8ca02f8c --- /dev/null +++ b/challenges/colab_exports/easy/1_vector_add.ipynb @@ -0,0 +1,483 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a GPU program that performs element-wise addition of two vectors containing 32-bit floating point numbers.\n The program should take two input vectors of equal length and produce a single output vector containing their sum.\n

\n\n

Implementation Requirements

\n\n\n

Example 1:

\n
\nInput:  A = [1.0, 2.0, 3.0, 4.0]\n        B = [5.0, 6.0, 7.0, 8.0]\nOutput: C = [6.0, 8.0, 10.0, 12.0]\n
\n\n

Example 2:

\n
\nInput:  A = [1.5, 1.5, 1.5]\n        B = [2.3, 2.3, 2.3]\nOutput: C = [3.8, 3.8, 3.8]\n
\n\n

Constraints

\n\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void vector_add(const float* A, const float* B, float* C, int N) {}\n\n// A, B, C are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, const float* B, float* C, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n vector_add<<>>(A, B, C, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, C: cute.Tensor, N: cute.Uint32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef vector_add_kernel(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# A, B, C are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[vector_add_kernel, vector_add_kernel]()\n ctx.enqueue_function(_kernel, A, B, C, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef vector_add_kernel(a, b, c, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# a, b, c are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n vector_add_kernel[grid](a, b, c, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Vector Addition\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, N: int):\n", + " assert A.shape == B.shape == C.shape\n", + " assert A.dtype == B.dtype == C.dtype\n", + " assert A.device == B.device == C.device\n", + "\n", + " torch.add(A, B, out=C)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_size_t, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " A = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " C = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_specs = [\n", + " (\"scalar_tail_1\", [1.0], [2.0]),\n", + " (\"scalar_tail_2\", [1.0, 2.0], [3.0, 4.0]),\n", + " (\"scalar_tail_3\", [1.0, 2.0, 3.0], [4.0, 5.0, 6.0]),\n", + " (\"basic_small\", [1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]),\n", + " (\"all_zeros\", [0.0] * 16, [0.0] * 16),\n", + " (\"non_power_of_two\", [1.0] * 30, [2.0] * 30),\n", + " (\"negative_numbers\", [-1.0, -2.0, -3.0, -4.0], [-5.0, -6.0, -7.0, -8.0]),\n", + " (\"mixed_positive_negative\", [1.0, -2.0, 3.0, -4.0], [-1.0, 2.0, -3.0, 4.0]),\n", + " (\n", + " \"very_small_numbers\",\n", + " [0.000001, 0.0000001, 0.00000001, 0.000000001],\n", + " [0.000001, 0.0000001, 0.00000001, 0.000000001],\n", + " ),\n", + " (\n", + " \"large_numbers\",\n", + " [1000000.0, 10000000.0, -1000000.0, -10000000.0],\n", + " [1000000.0, -10000000.0, -1000000.0, 10000000.0],\n", + " ),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, a_vals, b_vals in test_specs:\n", + " n = len(a_vals)\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(a_vals, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor(b_vals, device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros(n, device=\"cuda\", dtype=dtype),\n", + " \"N\": n,\n", + " }\n", + " )\n", + "\n", + " # Random test cases\n", + " for _, size, a_range, b_range in [\n", + " (\"powers_of_two_size\", 32, (0.0, 32.0), (0.0, 64.0)),\n", + " (\"medium_sized_vector\", 1000, (0.0, 7.0), (0.0, 5.0)),\n", + " (\"large_vector\", 10000, (0.0, 1.0), (0.0, 1.0)),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty(size, device=\"cuda\", dtype=dtype).uniform_(*a_range),\n", + " \"B\": torch.empty(size, device=\"cuda\", dtype=dtype).uniform_(*b_range),\n", + " \"C\": torch.zeros(size, device=\"cuda\", dtype=dtype),\n", + " \"N\": size,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 25000000\n", + " return {\n", + " \"A\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"B\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"C\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/21_relu.ipynb b/challenges/colab_exports/easy/21_relu.ipynb new file mode 100644 index 00000000..289f21bc --- /dev/null +++ b/challenges/colab_exports/easy/21_relu.ipynb @@ -0,0 +1,530 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that performs the Rectified Linear Unit (ReLU) activation function on a vector of 32-bit floating point numbers.\n The ReLU function sets all negative values to zero and leaves positive values unchanged: $$\\text{ReLU}(x) = \\max(0, x)$$\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in output
  • \n
\n\n

Example 1:

\n
\nInput:  input = [-2.0, -1.0, 0.0, 1.0, 2.0]\nOutput: output = [0.0, 0.0, 0.0, 1.0, 2.0]\n
\n\n

Example 2:

\n
\nInput:  input = [-3.5, 0.0, 4.2]\nOutput: output = [0.0, 0.0, 4.2]\n
\n\n

Constraints

\n
    \n
  • 1 ≤ N ≤ 100,000,000
  • \n\n
  • Performance is measured with N = 25,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void relu_kernel(const float* input, float* output, int N) {}\n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, float* output, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n relu_kernel<<>>(input, output, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef relu_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(N, threadsPerBlock)\n\n var _kernel = ctx.compile_function[relu_kernel, relu_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, N, grid_dim=blocksPerGrid, block_dim=threadsPerBlock\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef relu_kernel(input, output, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n relu_kernel[grid](input, output, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"ReLU\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " # Apply ReLU: max(0, x)\n", + " output.copy_(torch.relu(input))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_tensor = torch.tensor([-2.0, -1.0, 0.0, 1.0, 2.0], device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(5, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"output\": output_tensor,\n", + " \"N\": 5,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + "\n", + " test_cases = []\n", + "\n", + " # Edge case: single element (N=1)\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # Edge case: N=2\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Edge case: N=3\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-2.0, 0.0, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # Fixed-value test cases\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-2.0, -1.0, 0.0, 1.0, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, -2.0, -3.0, -4.0, -5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([0.0, 0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-1000.0, -100.0, 0.0, 100.0, 1000.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-0.001, -0.0001, 0.0, 0.0001, 0.001], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # Random range test cases\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(1024, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"output\": torch.zeros(1024, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.zeros(10000, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 25000000 # Large vector for performance testing\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/23_leaky_relu.ipynb b/challenges/colab_exports/easy/23_leaky_relu.ipynb new file mode 100644 index 00000000..cc59cbaf --- /dev/null +++ b/challenges/colab_exports/easy/23_leaky_relu.ipynb @@ -0,0 +1,505 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that performs the leaky ReLU activation function on a vector of floating-point numbers. The leaky ReLU function is defined as:\n $$ f(x) = \\begin{cases}\n x & \\text{if } x > 0 \\\\\n \\alpha x & \\text{if } x \\leq 0\n \\end{cases} $$\n where $\\alpha$ is a small positive constant (0.01 in this problem).\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in vector output
  • \n
  • Use $\\alpha = 0.01$ as the leaky coefficient
  • \n
\n\n

Example 1:

\n
\n  Input:  x = [1.0, -2.0, 3.0, -4.0]\n  Output: y = [1.0, -0.02, 3.0, -0.04]
\n\n

Example 2:

\n
\n  Input:  x = [-1.5, 0.0, 2.5, -3.0]\n  Output: y = [-0.015, 0.0, 2.5, -0.03]
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 100,000,000
  • \n
  • -1000.0 \u2264 input[i] \u2264 1000.0
  • \n\n
  • Performance is measured with N = 50,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void leaky_relu_kernel(const float* input, float* output, int N) {}\n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, float* output, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n leaky_relu_kernel<<>>(input, output, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef leaky_relu_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(N, threadsPerBlock)\n\n var _kernel = ctx.compile_function[leaky_relu_kernel, leaky_relu_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, N, grid_dim=blocksPerGrid, block_dim=threadsPerBlock\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef leaky_relu_kernel(input, output, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n leaky_relu_kernel[grid](input, output, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Leaky ReLU\", atol=1e-06, rtol=1e-06, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " # Apply Leaky ReLU: f(x) = x if x > 0, else 0.01 * x\n", + " alpha = 0.01\n", + " output[:] = torch.where(input > 0, input, alpha * input)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_tensor = torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(4, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"output\": output_tensor,\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + "\n", + " # Edge case: single element (N=1)\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # Edge case: N=2\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Edge case: N=3\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-2.0, 0.0, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # basic_example\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # all_positive\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # all_negative\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, -2.0, -3.0, -4.0, -5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # zeros\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.zeros(1024, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1024, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + "\n", + " # medium_random\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.zeros(10000, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50000000 # Large vector for performance testing\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"output\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/24_rainbow_table.ipynb b/challenges/colab_exports/easy/24_rainbow_table.ipynb new file mode 100644 index 00000000..93443e8b --- /dev/null +++ b/challenges/colab_exports/easy/24_rainbow_table.ipynb @@ -0,0 +1,520 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that performs R rounds of parallel hashing on an array of 32-bit integers using the provided hash function.\n The hash should be applied R times iteratively (the output of one round becomes the input to the next).\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in array output
  • \n
\n\n

Example 1:

\n
Input:  numbers = [123, 456, 789], R = 2\nOutput: hashes = [1636807824, 1273011621, 2193987222]
\n\n

Example 2:

\n
Input:  numbers = [0, 1, 2147483647], R = 3\nOutput: hashes = [96754810, 3571711400, 2006156166]
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 10,000,000
  • \n
  • 1 \u2264 R \u2264 100
  • \n
  • 0 \u2264 input[i] \u2264 2147483647
  • \n\n
  • Performance is measured with N = 5,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__device__ unsigned int fnv1a_hash(unsigned int input) {\n const unsigned int FNV_PRIME = 16777619;\n const unsigned int OFFSET_BASIS = 2166136261;\n\n unsigned int hash = OFFSET_BASIS;\n\n for (int byte_pos = 0; byte_pos < 4; byte_pos++) {\n unsigned char byte = (input >> (byte_pos * 8)) & 0xFFu;\n hash = (hash ^ byte) * FNV_PRIME;\n }\n\n return hash;\n}\n\n__global__ void fnv1a_hash_kernel(const int* input, unsigned int* output, int N, int R) {}\n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, unsigned int* output, int N, int R) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n fnv1a_hash_kernel<<>>(input, output, N, R);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\ndef fnv1a_hash_u32_scalar(x: cute.Uint32) -> cute.Uint32:\n FNV_PRIME = 16777619\n OFFSET_BASIS = 2166136261\n hash_val = cute.Uint32(OFFSET_BASIS)\n prime = cute.Uint32(FNV_PRIME)\n mask = cute.Uint32(0xFF)\n for byte_pos in range(4):\n byte = (x >> (byte_pos * 8)) & mask\n hash_val = (hash_val ^ byte) * prime\n return cute.Uint32(hash_val)\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, R: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\ndef fnv1a_hash(x: jax.Array) -> jax.Array:\n FNV_PRIME = jnp.uint32(16777619)\n OFFSET_BASIS = jnp.uint32(2166136261)\n hash_val = jnp.full_like(x, OFFSET_BASIS, dtype=jnp.uint32)\n\n MASK_FF = jnp.uint32(0xFF)\n for byte_pos in range(4):\n byte = (x >> jnp.uint32(byte_pos * 8)) & MASK_FF\n hash_val = hash_val ^ byte\n hash_val = hash_val * FNV_PRIME\n\n return hash_val\n\n\n# input is a tensor on the GPU\ndef solve(input: jax.Array, N: int, R: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef fnv1a_hash(input: UInt32) -> UInt32:\n alias FNV_PRIME: UInt32 = 16777619\n alias OFFSET_BASIS: UInt32 = 2166136261\n\n var hash: UInt32 = OFFSET_BASIS\n\n for byte_pos in range(4):\n var byte_val: UInt32 = (input >> (byte_pos * 8)) & UInt32(0xFF)\n hash = (hash ^ byte_val) * FNV_PRIME\n\n return hash\n\n\ndef fnv1a_hash_kernel(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[UInt32, MutExternalOrigin],\n N: Int32,\n R: Int32,\n):\n pass\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[UInt32, MutExternalOrigin],\n N: Int32,\n R: Int32,\n) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(N, threadsPerBlock)\n\n var _kernel = ctx.compile_function[fnv1a_hash_kernel, fnv1a_hash_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, N, R, grid_dim=blocksPerGrid, block_dim=threadsPerBlock\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\ndef fnv1a_hash(x: torch.Tensor) -> torch.Tensor:\n FNV_PRIME = 16777619\n OFFSET_BASIS = 2166136261\n x_int = x.to(torch.int64)\n hash_val = torch.full_like(x_int, OFFSET_BASIS, dtype=torch.int64)\n\n for byte_pos in range(4):\n byte = (x_int >> (byte_pos * 8)) & 0xFF\n hash_val = (hash_val ^ byte) * FNV_PRIME\n hash_val = hash_val & 0xFFFFFFFF\n\n return hash_val.to(torch.int32)\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, R: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef fnv1a_hash(x):\n FNV_PRIME = 16777619\n OFFSET_BASIS = 2166136261\n\n hash_val = tl.full(x.shape, OFFSET_BASIS, tl.uint32)\n\n for byte_pos in range(4):\n byte = (x >> (byte_pos * 8)) & 0xFF\n hash_val = (hash_val ^ byte) * FNV_PRIME\n\n return hash_val\n\n\n@triton.jit\ndef fnv1a_hash_kernel(input, output, n_elements, n_rounds, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, R: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n fnv1a_hash_kernel[grid](input, output, N, R, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Rainbow Table\", atol=0, rtol=0, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def fnv1a_hash(self, x: torch.Tensor) -> torch.Tensor:\n", + " FNV_PRIME = 16777619\n", + " OFFSET_BASIS = 2166136261\n", + " x_int = x.to(torch.int64)\n", + " hash_val = torch.full_like(x_int, OFFSET_BASIS, dtype=torch.int64)\n", + " for byte_pos in range(4):\n", + " byte = (x_int >> (byte_pos * 8)) & 0xFF\n", + " hash_val = (hash_val ^ byte) * FNV_PRIME\n", + " hash_val = hash_val & 0xFFFFFFFF\n", + " return hash_val\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, R: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.uint32\n", + "\n", + " current = input\n", + "\n", + " # Apply hash R times\n", + " for _ in range(R):\n", + " current = self.fnv1a_hash(current)\n", + "\n", + " # Reinterpret the lower 32 bits as uint32\n", + " output.copy_(current.to(torch.int32).view(torch.uint32))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int32), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_uint32), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"R\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " input_tensor = torch.tensor([123, 456, 789], device=\"cuda\", dtype=torch.int32)\n", + " output_tensor = torch.empty(3, device=\"cuda\", dtype=torch.uint32)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"output\": output_tensor,\n", + " \"N\": 3,\n", + " \"R\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + "\n", + " test_cases = []\n", + "\n", + " # Force users to handle \"0 chunks\" logic\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([100], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 1,\n", + " \"R\": 1,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([100, 200], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(2, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 2,\n", + " \"R\": 1,\n", + " }\n", + " )\n", + "\n", + " # basic_example\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([123, 456, 789], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(3, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 3,\n", + " \"R\": 2,\n", + " }\n", + " )\n", + "\n", + " # zero_and_max\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([0, 1, 2147483647], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(3, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 3,\n", + " \"R\": 3,\n", + " }\n", + " )\n", + "\n", + " # single_round\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1, 2, 3, 4, 5], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 5,\n", + " \"R\": 1,\n", + " }\n", + " )\n", + "\n", + " # many_rounds\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(0, 2147483647 + 1, (1024,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1024, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 1024,\n", + " \"R\": 50,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(0, 2147483647 + 1, (10000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(10000, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": 10000,\n", + " \"R\": 10,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " N, R = 5000000, 10 # Large array with moderate rounds for performance testing\n", + " return {\n", + " \"input\": torch.randint(0, 2147483647 + 1, (N,), device=\"cuda\", dtype=torch.int32),\n", + " \"output\": torch.zeros(N, device=\"cuda\", dtype=torch.uint32),\n", + " \"N\": N,\n", + " \"R\": R,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/2_matrix_multiplication.ipynb b/challenges/colab_exports/easy/2_matrix_multiplication.ipynb new file mode 100644 index 00000000..0010865e --- /dev/null +++ b/challenges/colab_exports/easy/2_matrix_multiplication.ipynb @@ -0,0 +1,518 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a program that multiplies two matrices of 32-bit floating point numbers on a GPU.\n Given matrix $A$ of dimensions $M \\times N$ and matrix $B$ of dimensions $N \\times K$, compute\n the product matrix $C = A \\times B$, which will have dimensions $M \\times K$.\n All matrices are stored in row-major format.\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in matrix C
  • \n
\n\n

Example 1:

\n

\nInput:
\nMatrix $A$ ($2 \\times 2$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 \\\\\n3.0 & 4.0\n\\end{bmatrix}\n$$\nMatrix $B$ ($2 \\times 2$):\n$$\n\\begin{bmatrix}\n5.0 & 6.0 \\\\\n7.0 & 8.0\n\\end{bmatrix}\n$$\nOutput:
\nMatrix $C$ ($2 \\times 2$):\n$$\n\\begin{bmatrix}\n19.0 & 22.0 \\\\\n43.0 & 50.0\n\\end{bmatrix}\n$$\n

\n\n

Example 2:

\n

\nInput:
\nMatrix $A$ ($1 \\times 3$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0\n\\end{bmatrix}\n$$\nMatrix $B$ ($3 \\times 1$):\n$$\n\\begin{bmatrix}\n4.0 \\\\\n5.0 \\\\\n6.0\n\\end{bmatrix}\n$$\nOutput:
\nMatrix $C$ ($1 \\times 1$):\n$$\n\\begin{bmatrix}\n32.0\n\\end{bmatrix}\n$$\n

\n\n

Constraints

\n
    \n
  • 1 ≤ M, N, K ≤ 8192
  • \n
  • Performance is measured with M = 8192, N = 6144, K = 4096
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void matrix_multiplication_kernel(const float* A, const float* B, float* C, int M, int N,\n int K) {}\n\n// A, B, C are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, const float* B, float* C, int M, int N, int K) {\n dim3 threadsPerBlock(16, 16);\n dim3 blocksPerGrid((K + threadsPerBlock.x - 1) / threadsPerBlock.x,\n (M + threadsPerBlock.y - 1) / threadsPerBlock.y);\n\n matrix_multiplication_kernel<<>>(A, B, C, M, N, K);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor, B: cute.Tensor, C: cute.Tensor, M: cute.Int32, N: cute.Int32, K: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, M: int, N: int, K: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef matrix_multiplication_kernel(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n):\n pass\n\n\n# A, B, C are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 16\n var ctx = DeviceContext()\n\n var grid_dim_x = ceildiv(K, BLOCK_SIZE)\n var grid_dim_y = ceildiv(M, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[matrix_multiplication_kernel, matrix_multiplication_kernel]()\n ctx.enqueue_function(\n _kernel,\n A,\n B,\n C,\n M,\n N,\n K,\n grid_dim=(grid_dim_x, grid_dim_y),\n block_dim=(BLOCK_SIZE, BLOCK_SIZE),\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef matrix_multiplication_kernel(\n a, b, c, M, N, K, stride_am, stride_an, stride_bn, stride_bk, stride_cm, stride_ck\n):\n pass\n\n\n# a, b, c are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, M: int, N: int, K: int):\n stride_am, stride_an = N, 1\n stride_bn, stride_bk = K, 1\n stride_cm, stride_ck = K, 1\n\n grid = (M, K)\n matrix_multiplication_kernel[grid](\n a, b, c, M, N, K, stride_am, stride_an, stride_bn, stride_bk, stride_cm, stride_ck\n )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Matrix Multiplication\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int, K: int\n", + " ):\n", + " assert A.shape == (M, N)\n", + " assert B.shape == (N, K)\n", + " assert C.shape == (M, K)\n", + " assert A.dtype == B.dtype == C.dtype\n", + " assert A.device == B.device == C.device\n", + "\n", + " torch.matmul(A, B, out=C)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N, K = 2, 2, 2\n", + " A = torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([[5.0, 6.0], [7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " C = torch.empty(M, K, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_specs = [\n", + " # Basic test cases\n", + " (\"basic_2x2\", 2, 2, 2, [[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]),\n", + " (\"basic_1x3_3x1\", 1, 3, 1, [[1.0, 2.0, 3.0]], [[4.0], [5.0], [6.0]]),\n", + " (\n", + " \"identity_matrix\",\n", + " 3,\n", + " 3,\n", + " 3,\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],\n", + " ),\n", + " (\"zero_matrix\", 2, 2, 2, [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]),\n", + " (\n", + " \"rectangular_matrices\",\n", + " 2,\n", + " 3,\n", + " 1,\n", + " [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],\n", + " [[1.0], [2.0], [3.0]],\n", + " ),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, m, n, k, a_vals, b_vals in test_specs:\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(a_vals, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor(b_vals, device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty(m, k, device=\"cuda\", dtype=dtype),\n", + " \"M\": m,\n", + " \"N\": n,\n", + " \"K\": k,\n", + " }\n", + " )\n", + "\n", + " # Random test cases with different sizes\n", + " for _, m, n, k in [\n", + " (\"small_square\", 4, 4, 4),\n", + " (\"medium_rectangular\", 8, 6, 10),\n", + " (\"large_rectangular\", 16, 12, 20),\n", + " (\"tall_matrix\", 32, 8, 16),\n", + " (\"wide_matrix\", 8, 16, 32),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty(m, n, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"B\": torch.empty(n, k, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"C\": torch.empty(m, k, device=\"cuda\", dtype=dtype),\n", + " \"M\": m,\n", + " \"N\": n,\n", + " \"K\": k,\n", + " }\n", + " )\n", + "\n", + " # Edge cases\n", + " for _, m, n, k in [\n", + " (\"single_element\", 1, 1, 1),\n", + " (\"single_row\", 1, 5, 3),\n", + " (\"single_column\", 5, 3, 1),\n", + " (\"max_dimensions\", 8192, 6144, 4096),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty(m, n, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty(n, k, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty(m, k, device=\"cuda\", dtype=dtype),\n", + " \"M\": m,\n", + " \"N\": n,\n", + " \"K\": k,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N, K = 8192, 6144, 4096\n", + " return {\n", + " \"A\": torch.empty(M, N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"B\": torch.empty(N, K, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"C\": torch.empty(M, K, device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/31_matrix_copy.ipynb b/challenges/colab_exports/easy/31_matrix_copy.ipynb new file mode 100644 index 00000000..e617c322 --- /dev/null +++ b/challenges/colab_exports/easy/31_matrix_copy.ipynb @@ -0,0 +1,489 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that copies an $N \\times N$ matrix of 32-bit floating point numbers from input array $A$ to output array $B$ on the GPU. The program should perform a direct element-wise copy so that $B_{i,j} = A_{i,j}$ for all valid indices.\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in matrix B
  • \n
\n\n

Example 1:

\n
\nInput:  A = [[1.0, 2.0],\n             [3.0, 4.0]]\nOutput: B = [[1.0, 2.0],\n             [3.0, 4.0]]\n
\n\n

Example 2:

\n
\nInput:  A = [[5.5, 6.6, 7.7],\n             [8.8, 9.9, 10.1],\n             [11.2, 12.3, 13.4]]\nOutput: B = [[5.5, 6.6, 7.7],\n             [8.8, 9.9, 10.1],\n             [11.2, 12.3, 13.4]]\n
\n\n

Constraints

\n
    \n
  • 1 ≤ N ≤ 4096
  • \n
  • All elements are 32-bit floating point numbers
  • \n\n
  • Performance is measured with N = 4,096
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void copy_matrix_kernel(const float* A, float* B, int N) {}\n\n// A, B are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, float* B, int N) {\n int total = N * N;\n int threadsPerBlock = 256;\n int blocksPerGrid = (total + threadsPerBlock - 1) / threadsPerBlock;\n copy_matrix_kernel<<>>(A, B, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A is a tensor on the GPU\n@jax.jit\ndef solve(A: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef copy_matrix_kernel(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# A, B are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var total = N * N\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(total, threadsPerBlock)\n\n var _kernel = ctx.compile_function[copy_matrix_kernel, copy_matrix_kernel]()\n ctx.enqueue_function(_kernel, A, B, N, grid_dim=blocksPerGrid, block_dim=threadsPerBlock)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Matrix Copy\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, N: int):\n", + " assert A.shape == (N, N)\n", + " assert B.shape == (N, N)\n", + " assert A.dtype == B.dtype\n", + " assert A.device == B.device\n", + "\n", + " # Copy matrix A to B\n", + " B[:] = A\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype)\n", + " B = torch.empty(2, 2, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"N\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + "\n", + " test_cases = []\n", + "\n", + " # basic_2x2\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # all_zeros_4x4\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.zeros((4, 4), device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros((4, 4), device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # identity_3x3\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"B\": torch.zeros((3, 3), device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # negative_values_2x2\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[-1.0, -2.0], [-3.0, -4.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # large_N_16x16\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"B\": torch.zeros((16, 16), device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " }\n", + " )\n", + "\n", + " # single_element\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[42.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros((1, 1), device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4096\n", + " return {\n", + " \"A\": torch.empty(N, N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"B\": torch.empty(N, N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/3_matrix_transpose.ipynb b/challenges/colab_exports/easy/3_matrix_transpose.ipynb new file mode 100644 index 00000000..0311077c --- /dev/null +++ b/challenges/colab_exports/easy/3_matrix_transpose.ipynb @@ -0,0 +1,490 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a program that transposes a matrix of 32-bit floating point numbers on a GPU. The\n transpose of a matrix switches its rows and columns. Given a matrix $A$ of dimensions $rows \\times cols$, the transpose $A^T$ will have dimensions $cols \\times rows$. All matrices are stored in row-major format.\n

\n\n\n \n \n \n \n \n \n\n \n A\n \n \n \n 1\n \n 2\n \n \n 3\n \n 4\n \n \n 5\n \n 6\n 3 × 2\n\n \n rows → cols\n\n \n Aᵀ\n \n \n \n 1\n \n 3\n \n 5\n \n \n 2\n \n 4\n \n 6\n 2 × 3\n\n \n \n \n \n\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the matrix output
  • \n
\n\n

Example 1:

\n

Input: 2\u00d73 matrix

\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 \\\\\n4.0 & 5.0 & 6.0\n\\end{bmatrix}\n$$\n\n

Output: 3\u00d72 matrix

\n$$\n\\begin{bmatrix}\n1.0 & 4.0 \\\\\n2.0 & 5.0 \\\\\n3.0 & 6.0\n\\end{bmatrix}\n$$\n\n

Example 2:

\n

Input: 3\u00d71 matrix

\n$$\n\\begin{bmatrix}\n1.0 \\\\\n2.0 \\\\\n3.0\n\\end{bmatrix}\n$$\n\n

Output: 1\u00d73 matrix

\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0\n\\end{bmatrix}\n$$\n\n

Constraints

\n
    \n
  • 1 \u2264 rows, cols \u2264 8192
  • \n
  • Input matrix dimensions: rows \u00d7 cols
  • \n
  • Output matrix dimensions: cols \u00d7 rows
  • \n\n
  • Performance is measured with cols = 6,000, rows = 7,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void matrix_transpose_kernel(const float* input, float* output, int rows, int cols) {}\n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, float* output, int rows, int cols) {\n dim3 threadsPerBlock(16, 16);\n dim3 blocksPerGrid((cols + threadsPerBlock.x - 1) / threadsPerBlock.x,\n (rows + threadsPerBlock.y - 1) / threadsPerBlock.y);\n\n matrix_transpose_kernel<<>>(input, output, rows, cols);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, rows: cute.Int32, cols: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on GPU\n@jax.jit\ndef solve(input: jax.Array, rows: int, cols: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef matrix_transpose_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n rows: Int32,\n cols: Int32,\n):\n pass\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n rows: Int32,\n cols: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 32\n var ctx = DeviceContext()\n\n var grid_dim_x = ceildiv(cols, BLOCK_SIZE)\n var grid_dim_y = ceildiv(rows, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[matrix_transpose_kernel, matrix_transpose_kernel]()\n ctx.enqueue_function(\n _kernel,\n input,\n output,\n rows,\n cols,\n grid_dim=(grid_dim_x, grid_dim_y),\n block_dim=(BLOCK_SIZE, BLOCK_SIZE),\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, rows: int, cols: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef matrix_transpose_kernel(input, output, rows, cols, stride_ir, stride_ic, stride_or, stride_oc):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, rows: int, cols: int):\n stride_ir, stride_ic = cols, 1\n stride_or, stride_oc = rows, 1\n\n grid = (rows, cols)\n matrix_transpose_kernel[grid](\n input, output, rows, cols, stride_ir, stride_ic, stride_or, stride_oc\n )\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Matrix Transpose\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, rows: int, cols: int):\n", + " assert input.shape == (rows, cols)\n", + " assert output.shape == (cols, rows)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " output.copy_(input.transpose(0, 1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"rows\": (ctypes.c_int, \"in\"),\n", + " \"cols\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " rows, cols = 2, 3\n", + " input_tensor = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(cols, rows, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"output\": output_tensor,\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_specs = [\n", + " # Basic test cases\n", + " (\"basic_2x3\", 2, 3, [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]),\n", + " (\"basic_3x1\", 3, 1, [[1.0], [2.0], [3.0]]),\n", + " (\"square_2x2\", 2, 2, [[1.0, 2.0], [3.0, 4.0]]),\n", + " (\"single_row\", 1, 4, [[1.0, 2.0, 3.0, 4.0]]),\n", + " (\"single_column\", 4, 1, [[1.0], [2.0], [3.0], [4.0]]),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, r, c, input_vals in test_specs:\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(input_vals, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(c, r, device=\"cuda\", dtype=dtype),\n", + " \"rows\": r,\n", + " \"cols\": c,\n", + " }\n", + " )\n", + "\n", + " # Random test cases with different sizes\n", + " for _, rows, cols in [\n", + " (\"small_rectangular\", 4, 6),\n", + " (\"medium_square\", 8, 8),\n", + " (\"large_rectangular\", 16, 12),\n", + " (\"tall_matrix\", 32, 8),\n", + " (\"wide_matrix\", 8, 32),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(rows, cols, device=\"cuda\", dtype=dtype).uniform_(\n", + " -10.0, 10.0\n", + " ),\n", + " \"output\": torch.empty(cols, rows, device=\"cuda\", dtype=dtype),\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " }\n", + " )\n", + "\n", + " # Edge cases\n", + " for _, rows, cols in [\n", + " (\"single_element\", 1, 1),\n", + " (\"max_dimensions\", 8192, 8192),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(rows, cols, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1.0, 1.0\n", + " ),\n", + " \"output\": torch.empty(cols, rows, device=\"cuda\", dtype=dtype),\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " rows, cols = 7000, 6000\n", + " return {\n", + " \"input\": torch.empty(rows, cols, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"output\": torch.zeros(cols, rows, device=\"cuda\", dtype=dtype),\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/41_simple_inference.ipynb b/challenges/colab_exports/easy/41_simple_inference.ipynb new file mode 100644 index 00000000..280a0552 --- /dev/null +++ b/challenges/colab_exports/easy/41_simple_inference.ipynb @@ -0,0 +1,485 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Run inference on a PyTorch model. Given an input tensor and a trained torch.nn.Linear model, compute the forward pass and store the result in the output tensor.\n

\n\n

\n The model performs a linear transformation: output = input @ weight.T + bias where weight has shape [output_size, input_size] and bias has shape [output_size].\n

\n\n

Implementation Requirements

\n
    \n
  • Use PyTorch's built-in functions and operations
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output tensor
  • \n
  • The model is already loaded and ready for inference
  • \n
\n\n

Example 1:

\n
\n  Input:  input = [[1.0, 2.0]]  (batch_size=1, input_size=2)\n          model: Linear layer with weight=[[0.5, 1.0], [1.5, 0.5]], bias=[0.1, 0.2]\n  Output: output = [[2.6, 2.7]]  (batch_size=1, output_size=2)\n  
\n\n

Example 2:

\n
\n  Input:  input = [[1.0], [2.0], [3.0]]  (batch_size=3, input_size=1)\n          model: Linear layer with weight=[[2.0]], bias=[1.0]\n  Output: output = [[3.0], [5.0], [7.0]]  (batch_size=3, output_size=1)\n  
\n\n

Constraints

\n
    \n
  • 1 \u2264 batch_size \u2264 1,000
  • \n
  • 1 \u2264 input_size \u2264 1,000
  • \n
  • 1 \u2264 output_size \u2264 1,000
  • \n
  • -10.0 \u2264 input values \u2264 10.0
  • \n\n
  • Performance is measured with batch_size = 1,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input and model are on the GPU\ndef solve(input: jax.Array, model) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport torch.nn as nn\n\n\n# input, model, and output are on the GPU\ndef solve(input: torch.Tensor, model: nn.Module, output: torch.Tensor):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn as nn\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Simple Inference\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, model: nn.Module, output: torch.Tensor):\n", + " assert input.device == output.device\n", + " assert input.dtype == output.dtype\n", + "\n", + " with torch.no_grad():\n", + " result = model(input)\n", + " output.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (torch.Tensor, \"in\"),\n", + " \"model\": (nn.Module, \"in\"),\n", + " \"output\": (torch.Tensor, \"out\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + "\n", + " # Create a simple linear model\n", + " model = nn.Linear(2, 2)\n", + " model.weight.data = torch.tensor([[0.5, 1.0], [1.5, 0.5]], dtype=dtype)\n", + " model.bias.data = torch.tensor([0.1, 0.2], dtype=dtype)\n", + " model = model.to(device)\n", + "\n", + " input = torch.tensor([[1.0, 2.0]], device=device, dtype=dtype)\n", + " output = torch.empty((1, 2), device=device, dtype=dtype)\n", + "\n", + " return {\n", + " \"input\": input,\n", + " \"model\": model,\n", + " \"output\": output,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # Test 1: Basic 2->2 linear layer\n", + " model1 = nn.Linear(2, 2)\n", + " model1.weight.data = torch.tensor([[0.5, 1.0], [1.5, 0.5]], dtype=dtype)\n", + " model1.bias.data = torch.tensor([0.1, 0.2], dtype=dtype)\n", + " model1 = model1.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1.0, 2.0]], device=device, dtype=dtype),\n", + " \"model\": model1,\n", + " \"output\": torch.empty((1, 2), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 2: Single input/output\n", + " model2 = nn.Linear(1, 1)\n", + " model2.weight.data = torch.tensor([[2.0]], dtype=dtype)\n", + " model2.bias.data = torch.tensor([1.0], dtype=dtype)\n", + " model2 = model2.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1.0], [2.0], [3.0]], device=device, dtype=dtype),\n", + " \"model\": model2,\n", + " \"output\": torch.empty((3, 1), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 3: No bias\n", + " model3 = nn.Linear(3, 2, bias=False)\n", + " model3.weight.data = torch.tensor([[1.0, 0.0, -1.0], [0.5, 1.5, 0.0]], dtype=dtype)\n", + " model3 = model3.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [0.0, 1.0, -1.0]], device=device, dtype=dtype\n", + " ),\n", + " \"model\": model3,\n", + " \"output\": torch.empty((2, 2), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 4: Batch processing\n", + " model4 = nn.Linear(4, 3)\n", + " model4.weight.data = torch.randn((3, 4), dtype=dtype) * 0.5\n", + " model4.bias.data = torch.randn(3, dtype=dtype) * 0.1\n", + " model4 = model4.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randn((8, 4), device=device, dtype=dtype),\n", + " \"model\": model4,\n", + " \"output\": torch.empty((8, 3), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 5: Larger model\n", + " model5 = nn.Linear(10, 5)\n", + " model5.weight.data = torch.randn((5, 10), dtype=dtype) * 0.3\n", + " model5.bias.data = torch.randn(5, dtype=dtype) * 0.2\n", + " model5 = model5.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randn((16, 10), device=device, dtype=dtype),\n", + " \"model\": model5,\n", + " \"output\": torch.empty((16, 5), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 6: Zero weights\n", + " model6 = nn.Linear(2, 2)\n", + " model6.weight.data = torch.zeros((2, 2), dtype=dtype)\n", + " model6.bias.data = torch.tensor([1.0, -1.0], dtype=dtype)\n", + " model6 = model6.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[5.0, 10.0]], device=device, dtype=dtype),\n", + " \"model\": model6,\n", + " \"output\": torch.empty((1, 2), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 7: Identity-like transformation\n", + " model7 = nn.Linear(3, 3)\n", + " model7.weight.data = torch.eye(3, dtype=dtype)\n", + " model7.bias.data = torch.zeros(3, dtype=dtype)\n", + " model7 = model7.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [-1.0, 0.0, 1.0]], device=device, dtype=dtype\n", + " ),\n", + " \"model\": model7,\n", + " \"output\": torch.empty((2, 3), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Test 8: Single batch, many features\n", + " model8 = nn.Linear(20, 1)\n", + " model8.weight.data = torch.ones((1, 20), dtype=dtype) * 0.05 # Sum with scaling\n", + " model8.bias.data = torch.tensor([0.0], dtype=dtype)\n", + " model8 = model8.to(device)\n", + "\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randn((1, 20), device=device, dtype=dtype),\n", + " \"model\": model8,\n", + " \"output\": torch.empty((1, 1), device=device, dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + "\n", + " # Large model for performance testing\n", + " model = nn.Linear(512, 256)\n", + " model.weight.data = torch.randn((256, 512), dtype=dtype) * 0.1\n", + " model.bias.data = torch.randn(256, dtype=dtype) * 0.05\n", + " model = model.to(device)\n", + "\n", + " batch_size = 1000\n", + " input = torch.randn((batch_size, 512), device=device, dtype=dtype)\n", + " output = torch.empty((batch_size, 256), device=device, dtype=dtype)\n", + "\n", + " return {\n", + " \"input\": input,\n", + " \"model\": model,\n", + " \"output\": output,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/52_silu.ipynb b/challenges/colab_exports/easy/52_silu.ipynb new file mode 100644 index 00000000..3777f551 --- /dev/null +++ b/challenges/colab_exports/easy/52_silu.ipynb @@ -0,0 +1,504 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement the SiLU (Sigmoid Linear Unit) activation function forward pass for 1D input vectors.\n Given an input tensor of shape [N] where N is the number of elements, compute the output using the elementwise formula.\n

\n\n

\n SiLU is defined as:\n $$\n \\begin{align}\n \\sigma(x) &= \\frac{1}{1 + e^{-x}} \\\\\n \\text{SiLU}(x) &= x \\cdot \\sigma(x)\n \\end{align}\n $$\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output tensor
  • \n
\n\n

Example 1:

\n
\nInput:  input = [0.5, 1.0, -0.5]  (N=3)\nOutput: output = [0.3112295, 0.731059, -0.1887705]\n
\n\n

Example 2:

\n
\nInput:  input = [-1.0, -2.0, -3.0, -4.0, -5.0]  (N=5)\nOutput: output = [-0.26894143 -0.23840584 -0.14227763 -0.07194484 -0.03346425]\n
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 10,000
  • \n
  • -100.0 \u2264 input values \u2264 100.0
  • \n\n
  • Performance is measured with N = 50,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void silu_kernel(const float* input, float* output, int N) {}\n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n silu_kernel<<>>(input, output, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef silu_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(N, threadsPerBlock)\n\n var _kernel = ctx.compile_function[silu_kernel, silu_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, N, grid_dim=blocksPerGrid, block_dim=threadsPerBlock\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef silu_kernel(input, output, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n silu_kernel[grid](input, output, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Sigmoid Linear Unit\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " # Scale and shift\n", + " output.copy_(input * torch.sigmoid(input))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " input = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_small\n", + " N = 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([0.5, 1.0, -0.5], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # single_element\n", + " N = 1\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # all zeros\n", + " N = 42\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # negative numbers\n", + " N = 5\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, -2.0, -3.0, -4.0, -5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # mixed positive/negative\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-0.5, 0.0, 0.5, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large values\n", + " N = 1024\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large N\n", + " N = 10000\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/54_swiglu.ipynb b/challenges/colab_exports/easy/54_swiglu.ipynb new file mode 100644 index 00000000..e594d858 --- /dev/null +++ b/challenges/colab_exports/easy/54_swiglu.ipynb @@ -0,0 +1,498 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement the Swish-Gated Linear Unit (SWiGLU) activation function forward pass for 1D input vectors.\n Given an input tensor of shape [N] where N is the number of elements, compute the output using the elementwise formula.\n The input and output tensor must be of type float32.\n

\n\n

\n SWiGLU is defined as:\n

    \n
  1. Split input $x$ into two halves: $x_1$ and $x_2$
  2. \n
  3. Compute SiLU on the first half:\n $$\n \\text{SiLU}(x_1) = x_1 \\cdot \\sigma(x_1), \\quad\n \\sigma(x) = \\frac{1}{1 + e^{-x}}\n $$\n
  4. \n
  5. Compute the SWiGLU output:\n $$\n \\text{SWiGLU}(x_1, x_2) = \\text{SiLU}(x_1) \\cdot x_2\n $$\n
  6. \n
\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output tensor
  • \n
\n\n

Example 1:

\n
\nInput:  [1.0, 2.0, 3.0, 4.0]  (N=4)\nOutput: [2.1931758, 7.0463767]\n
\n\n

Example 2:

\n
\nInput:  [0.5, 1.0]  (N=2)\nOutput: [0.31122968]\n
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 100,000
  • \n
  • N is an even number
  • \n
  • -100.0 \u2264 input values \u2264 100.0
  • \n\n
  • Performance is measured with N = 100,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void swiglu_kernel(const float* input, float* output, int halfN) {}\n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N) {\n int halfN = N / 2;\n int threadsPerBlock = 256;\n int blocksPerGrid = (halfN + threadsPerBlock - 1) / threadsPerBlock;\n\n swiglu_kernel<<>>(input, output, halfN);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef swiglu_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N // 2, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[swiglu_kernel, swiglu_kernel]()\n ctx.enqueue_function(_kernel, input, output, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef swiglu(input, output, N, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N // 2, BLOCK_SIZE),)\n swiglu[grid](input, output, N, BLOCK_SIZE=BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Swish-Gated Linear Unit\", atol=1e-04, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert N % 2 == 0\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N // 2,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " x1, x2 = input.chunk(2)\n", + " output.copy_((x1 * torch.sigmoid(x1)) * x2)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " input = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(N // 2, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_small\n", + " N = 2\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([0.5, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # all zeros\n", + " N = 42\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # negative numbers\n", + " N = 6\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # mixed positive/negative\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-0.5, 0.0, -1.5, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large values\n", + " N = 1024\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large N\n", + " N = 2048\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 100000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/62_value_clipping.ipynb b/challenges/colab_exports/easy/62_value_clipping.ipynb new file mode 100644 index 00000000..b81313a3 --- /dev/null +++ b/challenges/colab_exports/easy/62_value_clipping.ipynb @@ -0,0 +1,510 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a GPU program that performs clipping on 1D input vectors.\n Given an input tensor of shape [N] where N is the number of elements,\n compute the output by clipping each element to a specified range [lo, hi].\n The input and output tensor must be of type float32.\n

\n\n

\n Clipping is defined as:\n

    \n
  1. For each element x in the input tensor, \"clip\" the element so that it falls within the allowed range [lo, hi].\n
  2. \n
  3. This operation ensures all values are within the specified range and is commonly used in ML for activation stabilization and pre-quantization.
  4. \n
\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output tensor
  • \n
\n\n

Example 1:

\n
\nInput:  [1.5, -2.0, 3.0, 4.5], lo = 0.0, hi = 3.5\nOutput: [1.5, 0.0, 3.0, 3.5]\n
\n\n

Example 2:

\n
\nInput:  [-1.0, 2.0, 5.0], lo = -0.5, hi = 2.5\nOutput: [-0.5, 2.0, 2.5]\n
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 100,000
  • \n
  • -106 \u2264 input[i] \u2264 106
  • \n
  • lo \u2264 hi
  • \n\n
  • Performance is measured with N = 100,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void clip_kernel(const float* input, float* output, float lo, float hi, int N) {}\n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, float lo, float hi, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n clip_kernel<<>>(input, output, lo, hi, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor, output: cute.Tensor, lo: cute.Float32, hi: cute.Float32, N: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, lo: float, hi: float, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef clip_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n lo: Float32,\n hi: Float32,\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n lo: Float32,\n hi: Float32,\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[clip_kernel, clip_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, lo, hi, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, lo: float, hi: float, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef clip_kernel(input, output, lo, hi, N, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, lo: float, hi: float, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n clip_kernel[grid](input, output, lo, hi, N, BLOCK_SIZE=BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Value Clipping\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, input: torch.Tensor, output: torch.Tensor, lo: float, hi: float, N: int\n", + " ):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + " output.copy_(input.clamp(min=lo, max=hi))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"lo\": (ctypes.c_float, \"in\"),\n", + " \"hi\": (ctypes.c_float, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " input = torch.tensor([1.5, -2.0, 3.0, 4.5], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " lo, hi = 0.0, 3.5\n", + " return {\"input\": input, \"output\": output, \"lo\": lo, \"hi\": hi, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # Example 2\n", + " N = 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, 2.0, 5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -0.5,\n", + " \"hi\": 2.5,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # all zeros\n", + " N = 42\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -1.0,\n", + " \"hi\": 1.0,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # negative numbers\n", + " N = 6\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -3.0,\n", + " \"hi\": -1.0,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # mixed positive/negative\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-0.5, 0.0, -1.5, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -1.0,\n", + " \"hi\": 0.5,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large values\n", + " N = 1024\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -50.9,\n", + " \"hi\": 50.1,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large N\n", + " N = 2048\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -25.5,\n", + " \"hi\": 25.05,\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 100000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"lo\": -51.24,\n", + " \"hi\": 39.51,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/63_interleave.ipynb b/challenges/colab_exports/easy/63_interleave.ipynb new file mode 100644 index 00000000..3e5ff0f8 --- /dev/null +++ b/challenges/colab_exports/easy/63_interleave.ipynb @@ -0,0 +1,542 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a GPU program that interleaves two arrays of 32-bit floating point numbers.\n Given two input arrays A and B, each of length N,\n produce an output array of length 2N where elements alternate between the two inputs:\n [A[0], B[0], A[1], B[1], A[2], B[2], ...]\n

\n\n\n \n \n \n \n \n \n \n \n \n\n \n A\n \n \n a₀\n \n a₁\n \n a₂\n \n a₃\n\n \n B\n \n \n b₀\n \n b₁\n \n b₂\n \n b₃\n\n \n output\n \n \n a₀\n \n b₀\n \n a₁\n \n b₁\n \n a₂\n \n b₂\n \n a₃\n \n b₃\n\n \n \n \n \n \n\n \n \n \n \n \n\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output array
  • \n
\n\n

Example 1:

\n
\nInput:  A = [1.0, 2.0, 3.0], B = [4.0, 5.0, 6.0]\nOutput: [1.0, 4.0, 2.0, 5.0, 3.0, 6.0]\n
\n\n

Example 2:

\n
\nInput:  A = [10.0, 20.0], B = [30.0, 40.0]\nOutput: [10.0, 30.0, 20.0, 40.0]\n
\n\n

Constraints

\n
    \n
  • 1 ≤ N ≤ 50,000,000
  • \n\n
  • Performance is measured with N = 25,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void interleave_kernel(const float* A, const float* B, float* output, int N) {}\n\n// A, B, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, const float* B, float* output, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n interleave_kernel<<>>(A, B, output, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, output are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef interleave_kernel(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# A, B, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[interleave_kernel, interleave_kernel]()\n ctx.enqueue_function(_kernel, A, B, output, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, output are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef interleave_kernel(A_ptr, B_ptr, output_ptr, N, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# A, B, output are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 256\n\n def grid(meta):\n return (triton.cdiv(N, meta[\"BLOCK_SIZE\"]),)\n\n interleave_kernel[grid](A, B, output, N, BLOCK_SIZE=BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Interleave Arrays\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert A.shape == (N,)\n", + " assert B.shape == (N,)\n", + " assert output.shape == (2 * N,)\n", + " assert A.dtype == B.dtype == output.dtype == torch.float32\n", + "\n", + " # Interleave: [A[0], B[0], A[1], B[1], ...]\n", + " output[0::2] = A\n", + " output[1::2] = B\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([4.0, 5.0, 6.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(6, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"output\": output,\n", + " \"N\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # Basic example\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([4.0, 5.0, 6.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(6, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # Single element\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # Two elements\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([10.0, 20.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([30.0, 40.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Negative values\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([-1.0, -2.0, -3.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-4.0, -5.0, -6.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(6, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # Mixed positive and negative\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-1.0, 2.0, -3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(8, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # Zeros\n", + " tests.append(\n", + " {\n", + " \"A\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.ones(5, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(10, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # Large values\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1e10, 1e-10], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([1e-10, 1e10], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Medium size random\n", + " N = 1024\n", + " tests.append(\n", + " {\n", + " \"A\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2 * N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # Larger random\n", + " N = 10000\n", + " tests.append(\n", + " {\n", + " \"A\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2 * N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # Even larger\n", + " N = 100000\n", + " tests.append(\n", + " {\n", + " \"A\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2 * N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 25000000 # 25 million elements each, 50 million output\n", + " return {\n", + " \"A\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.randn(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2 * N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/65_geglu.ipynb b/challenges/colab_exports/easy/65_geglu.ipynb new file mode 100644 index 00000000..cfdb6881 --- /dev/null +++ b/challenges/colab_exports/easy/65_geglu.ipynb @@ -0,0 +1,504 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement the Gaussian Error Gated Linear Unit (GEGLU) activation function forward pass for 1D input\n vectors. Given an input tensor of shape [N] where N is the number of elements, compute the output\n using the elementwise formula. The input and output tensor must be of type float32.\n

\n\n

\n GEGLU is defined as:\n

    \n
  1. Split input $x$ into two halves: $x_1$ and $x_2$
  2. \n
  3. Compute GELU on the second half:\n $$\n \\text{GELU}(x_2) = \\frac{1}{2} x_2 \\left(1 + \\text{erf}\\left(\\frac{x_2}{\\sqrt{2}}\\right)\\right)\n $$\n
  4. \n
  5. Compute the GEGLU output:\n $$\n \\text{GEGLU}(x_1, x_2) = x_1 \\cdot \\text{GELU}(x_2)\n $$\n
  6. \n
\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output tensor
  • \n
\n\n

Example 1:

\n
\nInput:  [1.0, 1.0]  (N=2)\nOutput: [0.8413447]\n
\n\n

Example 2:

\n
\nInput:  [2.0, -1.0, 1.0, 0.5]  (N=4)\nOutput: [1.6826895, -0.3457312]\n
\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 1,000,000
  • \n
  • N is an even number
  • \n
  • -100.0 \u2264 input values \u2264 100.0
  • \n\n
  • Performance is measured with N = 1,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void geglu_kernel(const float* input, float* output, int halfN) {}\n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N) {\n int halfN = N / 2;\n int threadsPerBlock = 256;\n int blocksPerGrid = (halfN + threadsPerBlock - 1) / threadsPerBlock;\n\n geglu_kernel<<>>(input, output, halfN);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef geglu_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N // 2, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[geglu_kernel, geglu_kernel]()\n ctx.enqueue_function(_kernel, input, output, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef geglu(input, output, N, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N // 2, BLOCK_SIZE),)\n geglu[grid](input, output, N, BLOCK_SIZE=BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Gaussian Error Gated Linear Unit\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert N % 2 == 0\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N // 2,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " x1, x2 = input.chunk(2)\n", + " gelu = 0.5 * x2 * (1.0 + torch.erf(x2 / math.sqrt(2.0)))\n", + " output.copy_(x1 * gelu)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 2\n", + " input = torch.tensor([1.0, 1.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(N // 2, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_small\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2.0, -1.0, 1.0, 0.5], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # all zeros\n", + " N = 42\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # negative numbers\n", + " N = 6\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-1.0, -2.0, -3.0, -4.0, -5.0, -6.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # mixed positive/negative\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-0.5, 0.0, -1.5, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large values\n", + " N = 1024\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # large N\n", + " N = 100000\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 1000000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"output\": torch.empty(N // 2, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/66_rgb_to_grayscale.ipynb b/challenges/colab_exports/easy/66_rgb_to_grayscale.ipynb new file mode 100644 index 00000000..ae1de095 --- /dev/null +++ b/challenges/colab_exports/easy/66_rgb_to_grayscale.ipynb @@ -0,0 +1,597 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a GPU program that converts an RGB image to grayscale on the GPU.\n Given an input RGB image represented as a 1D array of 32-bit floating point values,\n compute the corresponding grayscale image using the standard RGB to grayscale conversion formula.\n

\n\n

\n The conversion formula is: gray = 0.299 \u00d7 R + 0.587 \u00d7 G + 0.114 \u00d7 B\n

\n\n

\n The input array input contains height \u00d7 width \u00d7 3 elements,\n where the RGB values for each pixel are stored consecutively (R, G, B, R, G, B, ...).\n The output array output should contain height \u00d7 width grayscale values.\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the array output
  • \n
  • Use the exact coefficients: 0.299 for red, 0.587 for green, 0.114 for blue
  • \n
\n\n

Example 1:

\n
\nInput:  input = [255.0, 0.0, 0.0, 0.0, 255.0, 0.0, 0.0, 0.0, 255.0, 128.0, 128.0, 128.0], width=2, height=2\nOutput: output = [76.245, 149.685, 29.07, 128.0]\n
\n\n

Example 2:

\n
\nInput:  input = [100.0, 150.0, 200.0], width=1, height=1\nOutput: output = [140.75]\n
\n\n

Constraints

\n
    \n
  • 1 \u2264 width \u2264 4096
  • \n
  • 1 \u2264 height \u2264 4096
  • \n
  • width \u00d7 height \u2264 4,194,304
  • \n
  • All RGB values are in the range [0.0, 255.0]
  • \n\n
  • Performance is measured with height = 2,048, width = 2,048
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void rgb_to_grayscale_kernel(const float* input, float* output, int width, int height) {}\n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int width, int height) {\n int total_pixels = width * height;\n int threadsPerBlock = 256;\n int blocksPerGrid = (total_pixels + threadsPerBlock - 1) / threadsPerBlock;\n\n rgb_to_grayscale_kernel<<>>(input, output, width, height);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, width: cute.Int32, height: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on GPU\n@jax.jit\ndef solve(input: jax.Array, width: int, height: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef rgb_to_grayscale_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n width: Int32,\n height: Int32,\n):\n pass\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n width: Int32,\n height: Int32,\n) raises:\n var total_pixels = width * height\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(total_pixels, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[rgb_to_grayscale_kernel, rgb_to_grayscale_kernel]()\n ctx.enqueue_function(\n _kernel, input, output, width, height, grid_dim=num_blocks, block_dim=BLOCK_SIZE\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, width: int, height: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef rgb_to_grayscale_kernel(input, output, width, height, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, width: int, height: int):\n total_pixels = width * height\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(total_pixels, BLOCK_SIZE),)\n rgb_to_grayscale_kernel[grid](input, output, width, height, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"RGB to Grayscale\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, width: int, height: int):\n", + " assert input.shape == (height * width * 3,)\n", + " assert output.shape == (height * width,)\n", + " assert input.dtype == output.dtype == torch.float32\n", + " assert input.device == output.device\n", + "\n", + " # Reshape input to (height, width, 3) for easier processing\n", + " rgb_image = input.view(height, width, 3)\n", + "\n", + " # Apply RGB to grayscale conversion: gray = 0.299*R + 0.587*G + 0.114*B\n", + " grayscale = (\n", + " 0.299 * rgb_image[:, :, 0] + 0.587 * rgb_image[:, :, 1] + 0.114 * rgb_image[:, :, 2]\n", + " )\n", + "\n", + " # Flatten and store in output\n", + " output.copy_(grayscale.flatten())\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"width\": (ctypes.c_int, \"in\"),\n", + " \"height\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " width, height = 2, 2\n", + " # RGB values for a 2x2 image\n", + " # Pixel (0,0): R=255, G=0, B=0 (red)\n", + " # Pixel (0,1): R=0, G=255, B=0 (green)\n", + " # Pixel (1,0): R=0, G=0, B=255 (blue)\n", + " # Pixel (1,1): R=128, G=128, B=128 (gray)\n", + " input_data = torch.tensor(\n", + " [\n", + " 255.0,\n", + " 0.0,\n", + " 0.0, # red\n", + " 0.0,\n", + " 255.0,\n", + " 0.0, # green\n", + " 0.0,\n", + " 0.0,\n", + " 255.0, # blue\n", + " 128.0,\n", + " 128.0,\n", + " 128.0, # gray\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " )\n", + " output = torch.zeros(width * height, device=\"cuda\", dtype=torch.float32)\n", + " return {\n", + " \"input\": input_data,\n", + " \"output\": output,\n", + " \"width\": width,\n", + " \"height\": height,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " test_cases = []\n", + "\n", + " # Small test cases\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [255.0, 0.0, 0.0], device=\"cuda\", dtype=torch.float32\n", + " ), # red pixel\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [0.0, 255.0, 0.0], device=\"cuda\", dtype=torch.float32\n", + " ), # green pixel\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [0.0, 0.0, 255.0], device=\"cuda\", dtype=torch.float32\n", + " ), # blue pixel\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " }\n", + " )\n", + "\n", + " # 2x2 test case\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [\n", + " 100.0,\n", + " 150.0,\n", + " 200.0, # mixed color 1\n", + " 50.0,\n", + " 75.0,\n", + " 100.0, # mixed color 2\n", + " 200.0,\n", + " 100.0,\n", + " 50.0, # mixed color 3\n", + " 75.0,\n", + " 125.0,\n", + " 175.0, # mixed color 4\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " ),\n", + " \"output\": torch.zeros(4, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 2,\n", + " \"height\": 2,\n", + " }\n", + " )\n", + "\n", + " # Edge cases: zeros and max values\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.zeros(3, device=\"cuda\", dtype=torch.float32),\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.full((3,), 255.0, device=\"cuda\", dtype=torch.float32),\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " }\n", + " )\n", + "\n", + " # Larger test cases\n", + " for size in [4, 8, 16, 32]:\n", + " input_size = size * size * 3\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(\n", + " 0, 256, (input_size,), device=\"cuda\", dtype=torch.float32\n", + " ),\n", + " \"output\": torch.zeros(size * size, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": size,\n", + " \"height\": size,\n", + " }\n", + " )\n", + "\n", + " # Larger realistic sizes\n", + " for w, h in [(100, 100), (64, 48)]:\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(h * w * 3, device=\"cuda\", dtype=torch.float32).uniform_(\n", + " 0.0, 255.0\n", + " ),\n", + " \"output\": torch.zeros(h * w, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": w,\n", + " \"height\": h,\n", + " }\n", + " )\n", + "\n", + " # Non-square images\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(\n", + " 0, 256, (2 * 3 * 3,), device=\"cuda\", dtype=torch.float32\n", + " ), # 2x3 image\n", + " \"output\": torch.zeros(2 * 3, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 3,\n", + " \"height\": 2,\n", + " }\n", + " )\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(\n", + " 0, 256, (3 * 2 * 3,), device=\"cuda\", dtype=torch.float32\n", + " ), # 3x2 image\n", + " \"output\": torch.zeros(3 * 2, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": 2,\n", + " \"height\": 3,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " width, height = 2048, 2048\n", + " input_size = width * height * 3\n", + " output_size = width * height\n", + " return {\n", + " \"input\": torch.randint(0, 256, (input_size,), device=\"cuda\", dtype=torch.float32),\n", + " \"output\": torch.zeros(output_size, device=\"cuda\", dtype=torch.float32),\n", + " \"width\": width,\n", + " \"height\": height,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/68_sigmoid.ipynb b/challenges/colab_exports/easy/68_sigmoid.ipynb new file mode 100644 index 00000000..9d1cdb29 --- /dev/null +++ b/challenges/colab_exports/easy/68_sigmoid.ipynb @@ -0,0 +1,472 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a GPU program that applies the sigmoid activation function element-wise to a vector of\n 32-bit floating point numbers. For each element x in the input vector X,\n compute sigmoid(x) = 1 / (1 + exp(-x)) and store the result in the output vector\n Y. The sigmoid function maps any real number to the range (0, 1).\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in vector Y
  • \n
\n\n

Example 1:

\n
\nInput:  X = [0.0, 1.0, -1.0, 2.0]\nOutput: Y = [0.5, 0.7311, 0.2689, 0.8808]\n
\n\n

Example 2:

\n
\nInput:  X = [0.5, -0.5, 3.0, -3.0]\nOutput: Y = [0.6225, 0.3775, 0.9526, 0.0474]\n
\n\n

Constraints

\n
    \n
  • 1 ≤ N ≤ 100,000,000
  • \n
  • Input values are finite 32-bit floating point numbers
  • \n
  • Performance is measured with N = 50,000,000
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n\n__global__ void sigmoid_kernel(const float* X, float* Y, int N) {}\n\n// X, Y are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* X, float* Y, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n sigmoid_kernel<<>>(X, Y, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# X, Y are tensors on the GPU\n@cute.jit\ndef solve(X: cute.Tensor, Y: cute.Tensor, N: cute.Uint32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# X is a tensor on GPU\n@jax.jit\ndef solve(X: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef sigmoid_kernel(\n X: UnsafePointer[Float32, MutExternalOrigin],\n Y: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# X, Y are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n X: UnsafePointer[Float32, MutExternalOrigin],\n Y: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var num_blocks = ceildiv(N, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[sigmoid_kernel, sigmoid_kernel]()\n ctx.enqueue_function(_kernel, X, Y, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# X, Y are tensors on the GPU\ndef solve(X: torch.Tensor, Y: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef sigmoid_kernel(x_ptr, y_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# X, Y are tensors on the GPU\ndef solve(X: torch.Tensor, Y: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n grid = (triton.cdiv(N, BLOCK_SIZE),)\n sigmoid_kernel[grid](X, Y, N, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Sigmoid Activation\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, X: torch.Tensor, Y: torch.Tensor, N: int):\n", + " assert X.shape == Y.shape\n", + " assert X.dtype == torch.float32\n", + " assert Y.dtype == torch.float32\n", + " assert X.device.type == \"cuda\"\n", + " assert Y.device.type == \"cuda\"\n", + "\n", + " torch.sigmoid(X, out=Y)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"X\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"Y\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_size_t, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " X = torch.tensor([0.0, 1.0, -1.0, 2.0], device=\"cuda\", dtype=dtype)\n", + " Y = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"X\": X,\n", + " \"Y\": Y,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + "\n", + " test_specs = [\n", + " (\"single_zero\", [0.0]),\n", + " (\"single_positive\", [1.0]),\n", + " (\"single_negative\", [-1.0]),\n", + " (\"basic_small\", [0.0, 1.0, -1.0, 2.0]),\n", + " (\"all_zeros\", [0.0] * 16),\n", + " (\"large_positives\", [10.0, 20.0, 100.0, 1000.0]),\n", + " (\"large_negatives\", [-10.0, -20.0, -100.0, -1000.0]),\n", + " (\"mixed_values\", [0.5, -0.5, 1.5, -1.5, 3.0, -3.0, 0.0, 7.0]),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, x_vals in test_specs:\n", + " n = len(x_vals)\n", + " test_cases.append(\n", + " {\n", + " \"X\": torch.tensor(x_vals, device=\"cuda\", dtype=dtype),\n", + " \"Y\": torch.empty(n, device=\"cuda\", dtype=dtype),\n", + " \"N\": n,\n", + " }\n", + " )\n", + "\n", + " # Random and structured test cases\n", + " for size, low, high in [\n", + " (32, -5.0, 5.0),\n", + " (100, -3.0, 3.0),\n", + " (255, -10.0, 10.0),\n", + " (1024, -1.0, 1.0),\n", + " (10000, -5.0, 5.0),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"X\": torch.empty(size, device=\"cuda\", dtype=dtype).uniform_(low, high),\n", + " \"Y\": torch.empty(size, device=\"cuda\", dtype=dtype),\n", + " \"N\": size,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50000000\n", + " return {\n", + " \"X\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"Y\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/7_color_inversion.ipynb b/challenges/colab_exports/easy/7_color_inversion.ipynb new file mode 100644 index 00000000..1c0094e1 --- /dev/null +++ b/challenges/colab_exports/easy/7_color_inversion.ipynb @@ -0,0 +1,483 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Write a program to invert the colors of an image. The image is\n represented as a 1D array of RGBA (Red, Green, Blue, Alpha) values, where each\n component is an 8-bit unsigned integer (unsigned char).\n

\n\n

\n Color inversion is performed by subtracting each color component (R, G, B)\n from 255. The Alpha component should remain unchanged.\n

\n\n

\n The input array\n image will contain width * height * 4 elements. The\n first 4 elements represent the RGBA values of the top-left pixel, the next 4\n elements represent the pixel to its right, and so on.\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • \n The final result must be stored in the array\n image\n
  • \n
\n\n

Example 1:

\n
\nInput: image = [255, 0, 128, 255, 0, 255, 0, 255], width=1, height=2\nOutput: [0, 255, 127, 255, 255, 0, 255, 255]\n
\n\n

Example 2:

\n
\nInput: image = [10, 20, 30, 255, 100, 150, 200, 255], width=2, height=1\nOutput: [245, 235, 225, 255, 155, 105, 55, 255]\n
\n\n

Constraints

\n\n
    \n
  • 1 ≤ width ≤ 4096
  • \n
  • 1 ≤ height ≤ 4096
  • \n
  • width * height ≤ 8,388,608.
  • \n\n
  • Performance is measured with height = 5,120, width = 4,096
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void invert_kernel(unsigned char* image, int width, int height) {}\n// image_input, image_output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(unsigned char* image, int width, int height) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (width * height + threadsPerBlock - 1) / threadsPerBlock;\n\n invert_kernel<<>>(image, width, height);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# image are tensors on the GPU\n@cute.jit\ndef solve(image: cute.Tensor, width: cute.Int32, height: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# image is a tensor on the GPU\n@jax.jit\ndef solve(image: jax.Array, width: int, height: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef invert_kernel(image: UnsafePointer[UInt8, MutExternalOrigin], width: Int32, height: Int32):\n pass\n\n\n# image is a device pointer (i.e. pointer to memory on the GPU)\n@export\ndef solve(image: UnsafePointer[UInt8, MutExternalOrigin], width: Int32, height: Int32) raises:\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var total_pixels = width * height\n var blocksPerGrid = ceildiv(total_pixels, threadsPerBlock)\n\n var _kernel = ctx.compile_function[invert_kernel, invert_kernel]()\n ctx.enqueue_function(\n _kernel, image, width, height, grid_dim=blocksPerGrid, block_dim=threadsPerBlock\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# image is a tensor on the GPU\ndef solve(image: torch.Tensor, width: int, height: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef invert_kernel(image, width, height, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# image is a tensor on the GPU\ndef solve(image: torch.Tensor, width: int, height: int):\n BLOCK_SIZE = 1024\n n_pixels = width * height\n grid = (triton.cdiv(n_pixels, BLOCK_SIZE),)\n\n invert_kernel[grid](image, width, height, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Color Inversion\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, image: torch.Tensor, width: int, height: int):\n", + " assert image.shape == (height * width * 4,)\n", + " assert image.dtype == torch.uint8\n", + "\n", + " # Reshape to (height, width, 4) for easier processing\n", + " image_reshaped = image.view(height, width, 4)\n", + "\n", + " # Invert RGB channels (first 3 channels), keep alpha unchanged\n", + " image_reshaped[:, :, :3] = 255 - image_reshaped[:, :, :3]\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"image\": (ctypes.POINTER(ctypes.c_ubyte), \"inout\"),\n", + " \"width\": (ctypes.c_int, \"in\"),\n", + " \"height\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " width, height = 1, 2\n", + " image = torch.tensor([255, 0, 128, 255, 0, 255, 0, 255], device=\"cuda\", dtype=torch.uint8)\n", + " return {\n", + " \"image\": image,\n", + " \"width\": width,\n", + " \"height\": height,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " return [\n", + " {\n", + " \"image\": torch.tensor(\n", + " [\n", + " [[255, 0, 0, 255], [0, 255, 0, 255]],\n", + " [[0, 0, 255, 255], [128, 128, 128, 255]],\n", + " ],\n", + " dtype=torch.uint8,\n", + " device=\"cuda\",\n", + " ).flatten(),\n", + " \"width\": 2,\n", + " \"height\": 2,\n", + " },\n", + " {\n", + " \"image\": torch.tensor(\n", + " [[[100, 50, 200, 255]]], dtype=torch.uint8, device=\"cuda\"\n", + " ).flatten(),\n", + " \"width\": 1,\n", + " \"height\": 1,\n", + " },\n", + " {\n", + " \"image\": torch.zeros((3, 4, 4), dtype=torch.uint8, device=\"cuda\").flatten(),\n", + " \"width\": 4,\n", + " \"height\": 3,\n", + " },\n", + " {\n", + " \"image\": torch.full((5, 3, 4), 255, dtype=torch.uint8, device=\"cuda\").flatten(),\n", + " \"width\": 3,\n", + " \"height\": 5,\n", + " },\n", + " {\n", + " \"image\": torch.tensor(\n", + " [\n", + " [[10, 20, 30, 50], [40, 50, 60, 100]],\n", + " [[70, 80, 90, 150], [100, 110, 120, 200]],\n", + " ],\n", + " dtype=torch.uint8,\n", + " device=\"cuda\",\n", + " ).flatten(),\n", + " \"width\": 2,\n", + " \"height\": 2,\n", + " },\n", + " {\n", + " \"image\": torch.randint(0, 256, (64 * 64 * 4,), dtype=torch.uint8, device=\"cuda\"),\n", + " \"width\": 64,\n", + " \"height\": 64,\n", + " },\n", + " {\n", + " \"image\": torch.randint(0, 256, (32 * 64 * 4,), dtype=torch.uint8, device=\"cuda\"),\n", + " \"width\": 64,\n", + " \"height\": 32,\n", + " },\n", + " ]\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " width, height = 4096, 5120\n", + " size = width * height * 4\n", + " return {\n", + " \"image\": torch.randint(0, 256, (size,), device=\"cuda\", dtype=torch.uint8),\n", + " \"width\": width,\n", + " \"height\": height,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/8_matrix_addition.ipynb b/challenges/colab_exports/easy/8_matrix_addition.ipynb new file mode 100644 index 00000000..51dcb172 --- /dev/null +++ b/challenges/colab_exports/easy/8_matrix_addition.ipynb @@ -0,0 +1,561 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that performs element-wise addition of two $N \\times N$ matrices containing 32-bit floating point numbers on a GPU.\n The program should take two input matrices of equal dimensions and produce a single output matrix containing their element-wise sum.\n

\n\n

Implementation Requirements

\n
    \n
  • External libraries are not permitted
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in matrix C
  • \n
\n\n

Example 1:

\n
\nInput:  A = [[1.0, 2.0],\n             [3.0, 4.0]]\n        B = [[5.0, 6.0],\n             [7.0, 8.0]]\nOutput: C = [[6.0, 8.0],\n             [10.0, 12.0]]\n
\n\n

Example 2:

\n
\nInput:  A = [[1.5, 2.5, 3.5],\n             [4.5, 5.5, 6.5],\n             [7.5, 8.5, 9.5]]\n        B = [[0.5, 0.5, 0.5],\n             [0.5, 0.5, 0.5],\n             [0.5, 0.5, 0.5]]\nOutput: C = [[2.0, 3.0, 4.0],\n             [5.0, 6.0, 7.0],\n             [8.0, 9.0, 10.0]]\n
\n\n

Constraints

\n\n
    \n
  • Input matrices A and B have identical dimensions
  • \n
  • 1 ≤ N ≤ 4096
  • \n
  • All elements are 32-bit floating point numbers
  • \n\n
  • Performance is measured with N = 4,096
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void matrix_add(const float* A, const float* B, float* C, int N) {}\n\n// A, B, C are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, const float* B, float* C, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N * N + threadsPerBlock - 1) / threadsPerBlock;\n\n matrix_add<<>>(A, B, C, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, C: cute.Tensor, N: cute.Uint32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef matrix_add_kernel(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n):\n pass\n\n\n# A, B, C are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n var BLOCK_SIZE: Int32 = 256\n var ctx = DeviceContext()\n var n_elements = N * N\n var num_blocks = ceildiv(n_elements, BLOCK_SIZE)\n\n var _kernel = ctx.compile_function[matrix_add_kernel, matrix_add_kernel]()\n ctx.enqueue_function(_kernel, A, B, C, N, grid_dim=num_blocks, block_dim=BLOCK_SIZE)\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef matrix_add_kernel(a, b, c, n_elements, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# a, b, c are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, N: int):\n BLOCK_SIZE = 1024\n n_elements = N * N\n grid = (triton.cdiv(n_elements, BLOCK_SIZE),)\n matrix_add_kernel[grid](a, b, c, n_elements, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Matrix Addition\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, N: int):\n", + " assert A.shape == (N, N)\n", + " assert B.shape == (N, N)\n", + " assert C.shape == (N, N)\n", + " assert A.dtype == B.dtype == C.dtype\n", + " assert A.device == B.device == C.device\n", + "\n", + " torch.add(A, B, out=C)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 2\n", + " A = torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([[5.0, 6.0], [7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " C = torch.empty(N, N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + "\n", + " # basic_2x2\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[5.0, 6.0], [7.0, 8.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # all_zeros_4x4\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.zeros((4, 4), device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros((4, 4), device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros((4, 4), device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # identity_plus_identity_3x3\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"B\": torch.tensor(\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"C\": torch.zeros((3, 3), device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # negative_values_2x2\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[-1.0, -2.0], [-3.0, -4.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[-5.0, -6.0], [-7.0, -8.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # mixed_positive_negative_2x2\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[1.0, -2.0], [-3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[-1.0, 2.0], [3.0, -4.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # single_element_1x1\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor([[42.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[8.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.zeros((1, 1), device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # large_N_16x16\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"B\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"C\": torch.zeros((16, 16), device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " }\n", + " )\n", + "\n", + " # very_small_numbers\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[0.000001, 0.0000001], [0.00000001, 0.000000001]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"B\": torch.tensor(\n", + " [[0.000001, 0.0000001], [0.00000001, 0.000000001]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"C\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # large_numbers\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[1000000.0, 10000000.0], [-1000000.0, -10000000.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"B\": torch.tensor(\n", + " [[1000000.0, -10000000.0], [-1000000.0, 10000000.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"C\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # non_power_of_two_size_7x7\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty((7, 7), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0),\n", + " \"B\": torch.empty((7, 7), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0),\n", + " \"C\": torch.zeros((7, 7), device=\"cuda\", dtype=dtype),\n", + " \"N\": 7,\n", + " }\n", + " )\n", + "\n", + " # medium_size_32x32\n", + " test_cases.append(\n", + " {\n", + " \"A\": torch.empty((32, 32), device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"B\": torch.empty((32, 32), device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"C\": torch.zeros((32, 32), device=\"cuda\", dtype=dtype),\n", + " \"N\": 32,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4096\n", + " return {\n", + " \"A\": torch.empty(N, N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"B\": torch.empty(N, N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"C\": torch.zeros(N, N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/easy/9_1d_convolution.ipynb b/challenges/colab_exports/easy/9_1d_convolution.ipynb new file mode 100644 index 00000000..ad840de1 --- /dev/null +++ b/challenges/colab_exports/easy/9_1d_convolution.ipynb @@ -0,0 +1,520 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program that performs a 1D convolution operation. Given an input array and a kernel (filter), compute the convolved\n output. The convolution should be performed with a \"valid\" boundary condition, meaning the kernel is only applied\n where it fully overlaps with the input.\n

\n\n\n \n \n\n \n input\n\n \n \n \n \n \n \n \n 1\n 2\n 3\n 4\n 5\n\n \n \n\n \n kernel\n\n \n \n \n \n \n 1\n 0\n -1\n\n \n 1×1\n 2×0\n 3×(-1)\n\n \n = 1 + 0 + (-3) = -2\n\n \n \n \n \n \n \n \n\n \n output\n\n \n \n -2\n\n \n \n\n\n

\n The input consists of two arrays:\n

    \n
  • input: A 1D array of 32-bit floating-point numbers.
  • \n
  • kernel: A 1D array of 32-bit floating-point numbers representing the convolution kernel.
  • \n
\nThe output should be written to the output array, which will have a size of input_size - kernel_size + 1.\n

\n\n

\n The convolution operation is defined mathematically as:\n

\n\n$$\noutput[i] = \\sum_{j=0}^{kernel\\_size-1} input[i + j] \\cdot kernel[j]\n$$\n\n

\n where $i$ ranges from 0 to $input\\_size - kernel\\_size$.\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The\n solve function signature must remain unchanged\n
  • \n
  • The final result must be stored in the array\n output\n
  • \n
\n\n

Example 1:

\n
\nInput: input = [1, 2, 3, 4, 5], kernel = [1, 0, -1]\nOutput: [-2, -2, -2]\n
\n\n

Example 2:

\n
\nInput: input = [2, 4, 6, 8], kernel = [0.5, 0.2]\nOutput: [1.8, 3.2, 4.6]\n
\n\n

Constraints

\n\n
    \n
  • 1 ≤ input_size ≤ 1,500,000
  • \n
  • 1 ≤ kernel_size ≤ 2047
  • \n
  • kernel_sizeinput_size
  • \n\n
  • Performance is measured with input_size = 1,500,000, kernel_size = 2,047
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void convolution_1d_kernel(const float* input, const float* kernel, float* output,\n int input_size, int kernel_size) {}\n\n// input, kernel, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, const float* kernel, float* output, int input_size,\n int kernel_size) {\n int output_size = input_size - kernel_size + 1;\n int threadsPerBlock = 256;\n int blocksPerGrid = (output_size + threadsPerBlock - 1) / threadsPerBlock;\n\n convolution_1d_kernel<<>>(input, kernel, output, input_size,\n kernel_size);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, kernel, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n kernel: cute.Tensor,\n output: cute.Tensor,\n input_size: cute.Int32,\n kernel_size: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, kernel are tensors on the GPU\n@jax.jit\ndef solve(input: jax.Array, kernel: jax.Array, input_size: int, kernel_size: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\ndef convolution_1d_kernel(\n input: UnsafePointer[Float32, MutExternalOrigin],\n kernel: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n input_size: Int32,\n kernel_size: Int32,\n):\n pass\n\n\n# input, kernel, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n kernel: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n input_size: Int32,\n kernel_size: Int32,\n) raises:\n var output_size = input_size - kernel_size + 1\n var threadsPerBlock: Int32 = 256\n var ctx = DeviceContext()\n\n var blocksPerGrid = ceildiv(output_size, threadsPerBlock)\n\n var _kernel = ctx.compile_function[convolution_1d_kernel, convolution_1d_kernel]()\n ctx.enqueue_function(\n _kernel,\n input,\n kernel,\n output,\n input_size,\n kernel_size,\n grid_dim=blocksPerGrid,\n block_dim=threadsPerBlock,\n )\n\n ctx.synchronize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_size: int,\n kernel_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef conv1d_kernel(input, kernel, output, input_size, kernel_size, BLOCK_SIZE: tl.constexpr):\n pass\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_size: int,\n kernel_size: int,\n):\n BLOCK_SIZE = 1024\n n_blocks = triton.cdiv(input_size - kernel_size + 1, BLOCK_SIZE)\n grid = (n_blocks,)\n\n conv1d_kernel[grid](input, kernel, output, input_size, kernel_size, BLOCK_SIZE)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"1D Convolution\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " kernel: torch.Tensor,\n", + " output: torch.Tensor,\n", + " input_size: int,\n", + " kernel_size: int,\n", + " ):\n", + " assert input.shape == (input_size,)\n", + " assert kernel.shape == (kernel_size,)\n", + " assert output.shape == (input_size - kernel_size + 1,)\n", + " assert input.dtype == kernel.dtype == output.dtype\n", + " assert input.device == kernel.device == output.device\n", + "\n", + " # Create strided view of input for all windows\n", + " windows = input.unfold(0, kernel_size, 1)\n", + "\n", + " # Use einsum for explicit cross-correlation\n", + " # 'ij,j->i' means: for each window i, multiply with kernel j and sum over j\n", + " output.copy_(torch.einsum(\"ij,j->i\", windows, kernel))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"kernel\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"input_size\": (ctypes.c_int, \"in\"),\n", + " \"kernel_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=\"cuda\", dtype=dtype)\n", + " kernel_tensor = torch.tensor([1.0, 0.0, -1.0], device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"kernel\": kernel_tensor,\n", + " \"output\": output_tensor,\n", + " \"input_size\": 5,\n", + " \"kernel_size\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_specs = [\n", + " # Basic test cases\n", + " (\"basic_5x3\", [1.0, 2.0, 3.0, 4.0, 5.0], [1.0, 0.0, -1.0]),\n", + " (\"basic_4x2\", [2.0, 4.0, 6.0, 8.0], [0.5, 0.2]),\n", + " (\"identity_kernel\", [1.0, 2.0, 3.0, 4.0], [1.0]),\n", + " (\"edge_detection\", [1.0, 1.0, 1.0, 0.0, 0.0, 0.0], [1.0, -1.0]),\n", + " (\"smoothing\", [1.0, 2.0, 3.0, 4.0, 5.0], [0.25, 0.5, 0.25]),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, input_vals, kernel_vals in test_specs:\n", + " input_size = len(input_vals)\n", + " kernel_size = len(kernel_vals)\n", + " output_size = input_size - kernel_size + 1\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(input_vals, device=\"cuda\", dtype=dtype),\n", + " \"kernel\": torch.tensor(kernel_vals, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(output_size, device=\"cuda\", dtype=dtype),\n", + " \"input_size\": input_size,\n", + " \"kernel_size\": kernel_size,\n", + " }\n", + " )\n", + "\n", + " # Random test cases with different sizes\n", + " for _, input_size, kernel_size in [\n", + " (\"small_conv\", 10, 3),\n", + " (\"medium_conv\", 100, 7),\n", + " (\"large_conv\", 1000, 15),\n", + " (\"wide_kernel\", 50, 20),\n", + " (\"narrow_kernel\", 200, 2),\n", + " ]:\n", + " output_size = input_size - kernel_size + 1\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(input_size, device=\"cuda\", dtype=dtype).uniform_(\n", + " -10.0, 10.0\n", + " ),\n", + " \"kernel\": torch.empty(kernel_size, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1.0, 1.0\n", + " ),\n", + " \"output\": torch.empty(output_size, device=\"cuda\", dtype=dtype),\n", + " \"input_size\": input_size,\n", + " \"kernel_size\": kernel_size,\n", + " }\n", + " )\n", + "\n", + " # Edge cases\n", + " for _, input_size, kernel_size in [\n", + " (\"min_input\", 1, 1),\n", + " (\"kernel_equals_input\", 10, 10),\n", + " (\"large_input_small_kernel\", 10000, 3),\n", + " ]:\n", + " output_size = input_size - kernel_size + 1\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty(input_size, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1.0, 1.0\n", + " ),\n", + " \"kernel\": torch.empty(kernel_size, device=\"cuda\", dtype=dtype).uniform_(\n", + " -0.1, 0.1\n", + " ),\n", + " \"output\": torch.empty(output_size, device=\"cuda\", dtype=dtype),\n", + " \"input_size\": input_size,\n", + " \"kernel_size\": kernel_size,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_size, kernel_size = 1500000, 2047 # Large convolution for performance testing\n", + " output_size = input_size - kernel_size + 1\n", + " return {\n", + " \"input\": torch.empty(input_size, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"kernel\": torch.empty(kernel_size, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.empty(output_size, device=\"cuda\", dtype=dtype),\n", + " \"input_size\": input_size,\n", + " \"kernel_size\": kernel_size,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/12_multi_head_attention.ipynb b/challenges/colab_exports/hard/12_multi_head_attention.ipynb new file mode 100644 index 00000000..7903d4d7 --- /dev/null +++ b/challenges/colab_exports/hard/12_multi_head_attention.ipynb @@ -0,0 +1,495 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program for multi-head self-attention. Given three input matrices $Q$ (queries), $K$ (keys), and $V$ (values) of size $N \\times d_{\\text{model}}$, compute:\n $$ \\text{MultiHead}(Q,K,V) = \\text{Concat}(\\text{head}_1,\\ldots,\\text{head}_h) $$\n where each head computes:\n $$ \\text{head}_i = \\text{softmax}\\left(\\frac{Q_iK_i^T}{\\sqrt{d_k}}\\right)V_i $$\n with $d_k = d_{\\text{model}}/h$ and $Q_i, K_i, V_i$ being the i-th head's partition of the input matrices.\n

\n\n

Implementation Requirements

\n
    \n
  • Use only native features (external libraries are not permitted)
  • \n
  • The solve function signature must remain unchanged
  • \n
  • The final result must be stored in the output array
  • \n
\n\n

Example 1:

\n

\nInput:\n$$\n\\begin{align*}\nN &= 2, \\quad d_{\\text{model}} = 4, \\quad h = 2 \\$$1em]\nQ &= \\begin{bmatrix}\n1.0 & 0.0 & 2.0 & 3.0 \\\\\n4.0 & 5.0 & 6.0 & 7.0\n\\end{bmatrix} \\$$1em]\nK &= \\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0\n\\end{bmatrix} \\$$1em]\nV &= \\begin{bmatrix}\n0.5 & 1.0 & 1.5 & 2.0 \\\\\n2.5 & 3.0 & 3.5 & 4.0\n\\end{bmatrix}\n\\end{align*}\n$$\n\nOutput:\n$$\n\\begin{bmatrix}\n2.39 & 2.89 & 3.50 & 4.00 \\\\\n2.50 & 3.00 & 3.50 & 4.00\n\\end{bmatrix}\n$$\n

\n\n

Example 2:

\n

\nInput:\n$$\n\\begin{align*}\nN &= 1, \\quad d_{\\text{model}} = 2, \\quad h = 1 \\$$1em]\nQ &= \\begin{bmatrix} 1.0 & 1.0 \\end{bmatrix} \\$$1em]\nK &= \\begin{bmatrix} 1.0 & 1.0 \\end{bmatrix} \\$$1em]\nV &= \\begin{bmatrix} 2.0 & 3.0 \\end{bmatrix}\n\\end{align*}\n$$\n\nOutput:\n$$\n\\begin{bmatrix} 2.0 & 3.0 \\end{bmatrix}\n$$\n

\n\n

Constraints

\n
    \n
  • 1 \u2264 N \u2264 10000
  • \n
  • 2 \u2264 d_model \u2264 1024
  • \n
  • 1 \u2264 h \u2264 d_model
  • \n
  • d_model % h == 0
  • \n
  • -10.0 \u2264 values \u2264 10.0
  • \n\n
  • Performance is measured with N = 1,024, d_model = 1,024
  • \n
\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int N,\n int d_model, int h) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n d_model: cute.Int32,\n h: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, K: jax.Array, V: jax.Array, N: int, d_model: int, h: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n d_model: Int32,\n h: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n N: int,\n d_model: int,\n h: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n N: int,\n d_model: int,\n h: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Multi-Head Attention\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " d_model: int,\n", + " h: int,\n", + " ):\n", + " assert Q.shape == (N, d_model)\n", + " assert K.shape == (N, d_model)\n", + " assert V.shape == (N, d_model)\n", + " assert output.shape == (N, d_model)\n", + " assert Q.dtype == K.dtype == V.dtype == output.dtype\n", + " assert Q.device == K.device == V.device == output.device\n", + " d_k = d_model // h\n", + " result = torch.zeros((N, d_model), dtype=Q.dtype, device=Q.device)\n", + " for head in range(h):\n", + " Q_h = Q[:, head * d_k : (head + 1) * d_k]\n", + " K_h = K[:, head * d_k : (head + 1) * d_k]\n", + " V_h = V[:, head * d_k : (head + 1) * d_k]\n", + " scores = torch.matmul(Q_h, K_h.t()) / (d_k**0.5)\n", + " softmax = torch.softmax(scores, dim=1)\n", + " head_output = torch.matmul(softmax, V_h)\n", + " result[:, head * d_k : (head + 1) * d_k] = head_output\n", + " output.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"d_model\": (ctypes.c_int, \"in\"),\n", + " \"h\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[0.5, 1.0, 1.5, 2.0], [2.5, 3.0, 3.5, 4.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"N\": 2,\n", + " \"d_model\": 4,\n", + " \"h\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + " # basic_example\n", + " Q = torch.tensor([[1.0, 0.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[0.5, 1.0, 1.5, 2.0], [2.5, 3.0, 3.5, 4.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"N\": 2, \"d_model\": 4, \"h\": 2})\n", + " # single_head\n", + " Q = torch.tensor([[1.0, 1.0], [2.0, 2.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 1.0], [1.0, 1.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[2.0, 3.0], [4.0, 5.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 2, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"N\": 2, \"d_model\": 2, \"h\": 1})\n", + " # four_heads (random)\n", + " Q = torch.empty(4, 4, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " K = torch.empty(4, 4, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " V = torch.empty(4, 4, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " output = torch.empty(4, 4, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"N\": 4, \"d_model\": 4, \"h\": 4})\n", + " # medium_size (random)\n", + " Q = torch.empty(32, 32, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " K = torch.empty(32, 32, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " V = torch.empty(32, 32, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " output = torch.empty(32, 32, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"N\": 32, \"d_model\": 32, \"h\": 8}\n", + " )\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.empty(1024, 1024, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " K = torch.empty(1024, 1024, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " V = torch.empty(1024, 1024, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " output = torch.zeros(1024, 1024, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"N\": 1024,\n", + " \"d_model\": 1024,\n", + " \"h\": 16,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/14_multi_agent_sim.ipynb b/challenges/colab_exports/hard/14_multi_agent_sim.ipynb new file mode 100644 index 00000000..02c4e268 --- /dev/null +++ b/challenges/colab_exports/hard/14_multi_agent_sim.ipynb @@ -0,0 +1,483 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

\n Implement a program for a multi-agent flocking simulation (boids). The input consists of:\n

\n
  • An array agents containing N agents, where N is the total number of agents
  • \n
  • Each agent occupies 4 consecutive 32-bit floating point numbers in the array: $[x, y, v_x, v_y]$, where:\n
      \n
    • $(x, y)$ represents the agent's position in 2D space
    • \n
    • $(v_x, v_y)$ represents the agent's velocity vector
    • \n
    \n
  • \n
  • The total array size is 4 * N floats, with agent $i$'s data stored at indices [4i, 4i+1, 4i+2, 4i+3]
  • \n\n\n

    Simulation Rules

    \n
      \n
    1. For each agent $i$, identify all neighbors $j$ (where $i \\neq j$) within radius $r = 5.0$ using:\n $$\n \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2} < r\n $$\n
    2. \n
    3. Compute average velocity of neighboring agents:\n $$\n \\vec{v}_{avg} = \\begin{cases}\n \\frac{1}{|N_i|} \\sum_{j \\in N_i} \\vec{v}_j & \\text{if } |N_i| > 0 \\\\\n \\vec{v}_i & \\text{if } |N_i| = 0\n \\end{cases}\n $$\n where $N_i$ is the set of neighbors for agent $i$\n
    4. \n
    5. Update velocity:\n $$\n \\vec{v}_{new} = \\vec{v} + \\alpha(\\vec{v}_{avg} - \\vec{v}), \\text{ where } \\alpha = 0.05\n $$\n
    6. \n
    7. Update position:\n $$\n \\vec{p}_{new} = \\vec{p} + \\vec{v}_{new}\n $$\n
    8. \n
    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the agents_next array
    • \n
    \n\n

    Example 1:

    \n
    \nInput: N = 2\nagents = [\n  0.0, 0.0, 1.0, 0.0,    // Agent 0: [x, y, vx, vy]\n  3.0, 4.0, 0.0, -1.0    // Agent 1: [x, y, vx, vy]\n]\n\nOutput:\nagents_next = [\n  1.0, 0.0, 1.0, 0.0,    // Agent 0: [x, y, vx, vy]\n  3.0, 3.0, 0.0, -1.0    // Agent 1: [x, y, vx, vy]\n]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000
    • \n
    • Each agent's position and velocity components are 32-bit floats
    • \n\n
    • Performance is measured with N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// agents, agents_next are device pointers\nextern \"C\" void solve(const float* agents, float* agents_next, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# agents, agents_next are tensors on the GPU\n@cute.jit\ndef solve(agents: cute.Tensor, agents_next: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# agents is a tensor on the GPU\n@jax.jit\ndef solve(agents: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n agents: UnsafePointer[Float32, MutExternalOrigin],\n agents_next: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# agents, agents_next are tensors on the GPU\ndef solve(agents: torch.Tensor, agents_next: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# agents, agents_next are tensors on the GPU\ndef solve(agents: torch.Tensor, agents_next: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Multi-Agent Simulation\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, agents: torch.Tensor, agents_next: torch.Tensor, N: int):\n", + " assert agents.shape == (4 * N,)\n", + " assert agents_next.shape == (4 * N,)\n", + " assert agents.dtype == agents_next.dtype\n", + " assert agents.device == agents_next.device\n", + " r = 5.0\n", + " r2 = r * r\n", + " alpha = 0.05\n", + " agents_reshaped = agents.view(N, 4)\n", + " agents_next_reshaped = agents_next.view(N, 4)\n", + " positions = agents_reshaped[:, :2]\n", + " velocities = agents_reshaped[:, 2:]\n", + " diff = positions.unsqueeze(1) - positions.unsqueeze(0)\n", + " dist_sq = (diff**2).sum(dim=2)\n", + " dist_sq.fill_diagonal_(r2 + 1)\n", + " neighbor_mask = dist_sq < r2\n", + " sum_velocities = neighbor_mask.float() @ velocities\n", + " neighbor_counts = neighbor_mask.sum(dim=1, keepdim=True)\n", + " avg_velocities = torch.empty_like(velocities)\n", + " nonzero_mask = neighbor_counts[:, 0] > 0\n", + " avg_velocities[nonzero_mask] = sum_velocities[nonzero_mask] / neighbor_counts[nonzero_mask]\n", + " avg_velocities[~nonzero_mask] = velocities[~nonzero_mask]\n", + " new_velocities = velocities + alpha * (avg_velocities - velocities)\n", + " new_positions = positions + new_velocities\n", + " agents_next_reshaped[:] = torch.cat([new_positions, new_velocities], dim=1)\n", + " agents_next.copy_(agents_next_reshaped.view(-1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"agents\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"agents_next\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 2\n", + " agents = torch.tensor([0.0, 0.0, 1.0, 0.0, 5.0, 0.0, 0.0, 1.0], device=\"cuda\", dtype=dtype)\n", + " agents_next = torch.empty(4 * N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"agents\": agents,\n", + " \"agents_next\": agents_next,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + " # basic_example\n", + " agents = torch.tensor([0.0, 0.0, 1.0, 0.0, 3.0, 4.0, 0.0, -1.0], device=\"cuda\", dtype=dtype)\n", + " agents_next = torch.empty(8, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 2})\n", + " # single_agent\n", + " agents = torch.tensor([10.0, 15.0, 1.0, -1.0], device=\"cuda\", dtype=dtype)\n", + " agents_next = torch.empty(4, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 1})\n", + " # two_agents_interacting\n", + " agents = torch.tensor([0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0], device=\"cuda\", dtype=dtype)\n", + " agents_next = torch.empty(8, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 2})\n", + " # four_agents\n", + " agents = torch.tensor(\n", + " [0.0, 0.0, 1.0, 0.0, 2.0, 2.0, 0.0, 1.0, 4.0, 4.0, -1.0, 0.0, 6.0, 6.0, 0.0, -1.0],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " agents_next = torch.empty(16, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 4})\n", + " # boundary_distance\n", + " agents = torch.tensor(\n", + " [0.0, 0.0, 1.0, 1.0, 3.0, 4.0, -1.0, -1.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " agents_next = torch.empty(8, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 2})\n", + " # medium_simulation (random)\n", + " agents = torch.empty(4096, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0)\n", + " agents_next = torch.empty(4096, device=\"cuda\", dtype=dtype)\n", + " test_cases.append({\"agents\": agents, \"agents_next\": agents_next, \"N\": 1024})\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " agents = torch.empty(40000, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0)\n", + " agents_next = torch.empty(40000, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"agents\": agents,\n", + " \"agents_next\": agents_next,\n", + " \"N\": 10000,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/15_sorting.ipynb b/challenges/colab_exports/hard/15_sorting.ipynb new file mode 100644 index 00000000..454fbb9c --- /dev/null +++ b/challenges/colab_exports/hard/15_sorting.ipynb @@ -0,0 +1,456 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a program that sorts an array of 32-bit floating-point numbers in ascending order. You are free to choose any sorting algorithm.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The sorted result must be stored back in the input data array
    • \n
    \n\n

    Example

    \n
    \nInput: data = [5.0, 2.0, 8.0, 1.0, 9.0, 4.0], N = 6\nOutput: data = [1.0, 2.0, 4.0, 5.0, 8.0, 9.0]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 1,000,000
    • \n\n
    • Performance is measured with N = 1,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// data is device pointer\nextern \"C\" void solve(float* data, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# data are tensors on the GPU\n@cute.jit\ndef solve(data: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# data is a tensor on the GPU\n@jax.jit\ndef solve(data: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(data: UnsafePointer[Float32, MutExternalOrigin], N: Int32) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# data is a tensor on the GPU\ndef solve(data: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# data is a tensor on the GPU\ndef solve(data: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Sorting\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, data: torch.Tensor, N: int):\n", + " assert data.shape == (N,)\n", + " data.copy_(data.sort()[0])\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"data\": (ctypes.POINTER(ctypes.c_float), \"inout\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " data = torch.tensor([5.0, 2.0, 8.0, 1.0, 9.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"data\": data,\n", + " \"N\": 6,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # already_sorted\n", + " tests.append(\n", + " {\"data\": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=\"cuda\", dtype=dtype), \"N\": 5}\n", + " )\n", + " # reverse_sorted\n", + " tests.append(\n", + " {\"data\": torch.tensor([5.0, 4.0, 3.0, 2.0, 1.0], device=\"cuda\", dtype=dtype), \"N\": 5}\n", + " )\n", + " # all_same\n", + " tests.append({\"data\": torch.tensor([5.0] * 10, device=\"cuda\", dtype=dtype), \"N\": 10})\n", + " # single_element\n", + " tests.append({\"data\": torch.tensor([7.0], device=\"cuda\", dtype=dtype), \"N\": 1})\n", + " # power_of_two\n", + " tests.append(\n", + " {\n", + " \"data\": torch.empty(1024, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + " # non_power_of_two\n", + " tests.append(\n", + " {\n", + " \"data\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"N\": 1000,\n", + " }\n", + " )\n", + " # large_array\n", + " tests.append(\n", + " {\n", + " \"data\": torch.empty(32768, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"N\": 32768,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 1000000\n", + " data = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0)\n", + " return {\n", + " \"data\": data,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/20_kmeans_clustering.ipynb b/challenges/colab_exports/hard/20_kmeans_clustering.ipynb new file mode 100644 index 00000000..19356391 --- /dev/null +++ b/challenges/colab_exports/hard/20_kmeans_clustering.ipynb @@ -0,0 +1,682 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement the k-means clustering algorithm for 2D points. Given arrays of x and y coordinates for data points, initial centroids, and other parameters, assign each point to the nearest centroid and update the centroids iteratively. The final centroids and labels should be stored in the output arrays.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in labels, final_centroid_x, and final_centroid_y
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\nsample_size = 4, k = 2, max_iterations = 10\ndata_x = [1.0, 2.0, 8.0, 9.0]\ndata_y = [1.0, 2.0, 8.0, 9.0]\ninitial_centroid_x = [1.0, 8.0]\ninitial_centroid_y = [1.0, 8.0]\nOutput: (see reference implementation for expected output)\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 sample_size \u2264 1000000
    • \n
    • 1 \u2264 k \u2264 1000
    • \n
    • All arrays are float32 except labels, which is int32
    • \n\n
    • Performance is measured with k = 5, max_iterations = 30, sample_size = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// data_x, data_y, labels, initial_centroid_x, initial_centroid_y,\n// final_centroid_x, final_centroid_y are device pointers\nextern \"C\" void solve(const float* data_x, const float* data_y, int* labels,\n float* initial_centroid_x, float* initial_centroid_y, float* final_centroid_x,\n float* final_centroid_y, int sample_size, int k, int max_iterations) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# data_x, data_y, labels, initial_centroid_x, initial_centroid_y,\n# final_centroid_x, final_centroid_y are tensors on the GPU\n@cute.jit\ndef solve(\n data_x: cute.Tensor,\n data_y: cute.Tensor,\n labels: cute.Tensor,\n initial_centroid_x: cute.Tensor,\n initial_centroid_y: cute.Tensor,\n final_centroid_x: cute.Tensor,\n final_centroid_y: cute.Tensor,\n sample_size: cute.Int32,\n k: cute.Int32,\n max_iterations: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# data_x, data_y, initial_centroid_x, initial_centroid_y are tensors on the GPU\n@jax.jit\ndef solve(\n data_x: jax.Array,\n data_y: jax.Array,\n initial_centroid_x: jax.Array,\n initial_centroid_y: jax.Array,\n sample_size: int,\n k: int,\n max_iterations: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n data_x: UnsafePointer[Float32, MutExternalOrigin],\n data_y: UnsafePointer[Float32, MutExternalOrigin],\n labels: UnsafePointer[Int32, MutExternalOrigin],\n initial_centroid_x: UnsafePointer[Float32, MutExternalOrigin],\n initial_centroid_y: UnsafePointer[Float32, MutExternalOrigin],\n final_centroid_x: UnsafePointer[Float32, MutExternalOrigin],\n final_centroid_y: UnsafePointer[Float32, MutExternalOrigin],\n sample_size: Int32,\n k: Int32,\n max_iterations: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# data_x, data_y, labels, initial_centroid_x,\n# initial_centroid_y, final_centroid_x, final_centroid_y are tensors on the GPU\ndef solve(\n data_x: torch.Tensor,\n data_y: torch.Tensor,\n labels: torch.Tensor,\n initial_centroid_x: torch.Tensor,\n initial_centroid_y: torch.Tensor,\n final_centroid_x: torch.Tensor,\n final_centroid_y: torch.Tensor,\n sample_size: int,\n k: int,\n max_iterations: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# data_x, data_y, labels, initial_centroid_x,\n# initial_centroid_y, final_centroid_x, final_centroid_y are tensors on the GPU\ndef solve(\n data_x: torch.Tensor,\n data_y: torch.Tensor,\n labels: torch.Tensor,\n initial_centroid_x: torch.Tensor,\n initial_centroid_y: torch.Tensor,\n final_centroid_x: torch.Tensor,\n final_centroid_y: torch.Tensor,\n sample_size: int,\n k: int,\n max_iterations: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"K-Means Clustering\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " data_x: torch.Tensor,\n", + " data_y: torch.Tensor,\n", + " labels: torch.Tensor,\n", + " initial_centroid_x: torch.Tensor,\n", + " initial_centroid_y: torch.Tensor,\n", + " final_centroid_x: torch.Tensor,\n", + " final_centroid_y: torch.Tensor,\n", + " sample_size: int,\n", + " k: int,\n", + " max_iterations: int,\n", + " ):\n", + " assert data_x.shape == (sample_size,)\n", + " assert data_y.shape == (sample_size,)\n", + " assert initial_centroid_x.shape == (k,)\n", + " assert initial_centroid_y.shape == (k,)\n", + " assert final_centroid_x.shape == (k,)\n", + " assert final_centroid_y.shape == (k,)\n", + " assert labels.shape == (sample_size,)\n", + " final_centroid_x.copy_(initial_centroid_x)\n", + " final_centroid_y.copy_(initial_centroid_y)\n", + " for _ in range(max_iterations):\n", + " expanded_x = data_x.view(-1, 1) - final_centroid_x.view(1, -1)\n", + " expanded_y = data_y.view(-1, 1) - final_centroid_y.view(1, -1)\n", + " distances = expanded_x**2 + expanded_y**2\n", + " labels.copy_(torch.argmin(distances, dim=1))\n", + " for i in range(k):\n", + " mask = labels == i\n", + " if mask.any():\n", + " final_centroid_x[i] = data_x[mask].mean()\n", + " final_centroid_y[i] = data_y[mask].mean()\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"data_x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"data_y\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"labels\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"initial_centroid_x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"initial_centroid_y\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"final_centroid_x\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"final_centroid_y\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"sample_size\": (ctypes.c_int, \"in\"),\n", + " \"k\": (ctypes.c_int, \"in\"),\n", + " \"max_iterations\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " sample_size, k, max_iterations = 4, 2, 10\n", + " data_x = torch.tensor([1.0, 2.0, 8.0, 9.0], device=\"cuda\", dtype=dtype)\n", + " data_y = torch.tensor([1.0, 2.0, 8.0, 9.0], device=\"cuda\", dtype=dtype)\n", + " labels = torch.empty(sample_size, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor([1.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " initial_centroid_y = torch.tensor([1.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " final_centroid_x = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": sample_size,\n", + " \"k\": k,\n", + " \"max_iterations\": max_iterations,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + " # basic_clustering\n", + " data_x = torch.tensor(\n", + " [1.0, 1.5, 1.2, 1.3, 1.1, 5.0, 5.2, 5.1, 5.3, 5.4, 10.1, 10.2, 10.0, 10.3, 10.5],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " data_y = torch.tensor(\n", + " [1.0, 1.5, 1.2, 1.3, 1.1, 5.0, 5.2, 5.1, 5.3, 5.4, 10.1, 10.2, 10.0, 10.3, 10.5],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " labels = torch.empty(15, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor([3.4, 7.1, 8.5], device=\"cuda\", dtype=dtype)\n", + " initial_centroid_y = torch.tensor([3.4, 7.1, 8.5], device=\"cuda\", dtype=dtype)\n", + " final_centroid_x = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": 15,\n", + " \"k\": 3,\n", + " \"max_iterations\": 20,\n", + " }\n", + " )\n", + " # single_cluster\n", + " data_x = torch.tensor(\n", + " [1.0, 1.2, 1.1, 1.3, 1.5, 1.4, 1.6, 1.2, 1.3, 1.1], device=\"cuda\", dtype=dtype\n", + " )\n", + " data_y = torch.tensor(\n", + " [1.0, 1.2, 1.1, 1.3, 1.5, 1.4, 1.6, 1.2, 1.3, 1.1], device=\"cuda\", dtype=dtype\n", + " )\n", + " labels = torch.empty(10, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor([1.0, 5.0, 10.0], device=\"cuda\", dtype=dtype)\n", + " initial_centroid_y = torch.tensor([1.0, 5.0, 10.0], device=\"cuda\", dtype=dtype)\n", + " final_centroid_x = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": 10,\n", + " \"k\": 3,\n", + " \"max_iterations\": 10,\n", + " }\n", + " )\n", + " # empty_clusters\n", + " data_x = torch.tensor(\n", + " [\n", + " 1.0,\n", + " 1.5,\n", + " 1.2,\n", + " 1.3,\n", + " 1.1,\n", + " 1.4,\n", + " 1.6,\n", + " 1.2,\n", + " 1.7,\n", + " 1.3,\n", + " 10.0,\n", + " 10.5,\n", + " 10.2,\n", + " 10.3,\n", + " 10.1,\n", + " 10.4,\n", + " 10.6,\n", + " 10.2,\n", + " 10.7,\n", + " 10.3,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " data_y = torch.tensor(\n", + " [\n", + " 1.0,\n", + " 1.5,\n", + " 1.2,\n", + " 1.3,\n", + " 1.1,\n", + " 1.4,\n", + " 1.6,\n", + " 1.2,\n", + " 1.7,\n", + " 1.3,\n", + " 10.0,\n", + " 10.5,\n", + " 10.2,\n", + " 10.3,\n", + " 10.1,\n", + " 10.4,\n", + " 10.6,\n", + " 10.2,\n", + " 10.7,\n", + " 10.3,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " labels = torch.empty(20, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor([1.5, 5.0, 10.5], device=\"cuda\", dtype=dtype)\n", + " initial_centroid_y = torch.tensor([1.5, 5.0, 10.5], device=\"cuda\", dtype=dtype)\n", + " final_centroid_x = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": 20,\n", + " \"k\": 3,\n", + " \"max_iterations\": 15,\n", + " }\n", + " )\n", + " # max_iterations_limit\n", + " data_x = torch.tensor(\n", + " [1.0, 1.5, 1.2, 1.3, 1.1, 5.0, 5.2, 5.1, 5.3, 5.4, 10.1, 10.2, 10.0, 10.3, 10.5],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " data_y = torch.tensor(\n", + " [1.0, 1.5, 1.2, 1.3, 1.1, 5.0, 5.2, 5.1, 5.3, 5.4, 10.1, 10.2, 10.0, 10.3, 10.5],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " labels = torch.empty(15, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor([3.4, 7.1, 8.5], device=\"cuda\", dtype=dtype)\n", + " initial_centroid_y = torch.tensor([3.4, 7.1, 8.5], device=\"cuda\", dtype=dtype)\n", + " final_centroid_x = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": 15,\n", + " \"k\": 3,\n", + " \"max_iterations\": 5,\n", + " }\n", + " )\n", + " # medium_random\n", + " sample_size = 100\n", + " k = 5\n", + " data_x = torch.empty(sample_size, device=\"cuda\", dtype=dtype).uniform_(0.0, 100.0)\n", + " data_y = torch.empty(sample_size, device=\"cuda\", dtype=dtype).uniform_(0.0, 100.0)\n", + " labels = torch.empty(sample_size, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor(\n", + " [20.0, 40.0, 60.0, 80.0, 10.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " initial_centroid_y = torch.tensor(\n", + " [20.0, 40.0, 60.0, 80.0, 50.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " final_centroid_x = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": sample_size,\n", + " \"k\": k,\n", + " \"max_iterations\": 30,\n", + " }\n", + " )\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " sample_size = 10000\n", + " k = 5\n", + " data_x = torch.empty(sample_size, device=\"cuda\", dtype=dtype).uniform_(0.0, 1000.0)\n", + " data_y = torch.empty(sample_size, device=\"cuda\", dtype=dtype).uniform_(0.0, 1000.0)\n", + " labels = torch.empty(sample_size, device=\"cuda\", dtype=torch.int32)\n", + " initial_centroid_x = torch.tensor(\n", + " [100.0, 200.0, 300.0, 400.0, 500.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " initial_centroid_y = torch.tensor(\n", + " [100.0, 200.0, 300.0, 400.0, 500.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " final_centroid_x = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " final_centroid_y = torch.empty(k, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"data_x\": data_x,\n", + " \"data_y\": data_y,\n", + " \"labels\": labels,\n", + " \"initial_centroid_x\": initial_centroid_x,\n", + " \"initial_centroid_y\": initial_centroid_y,\n", + " \"final_centroid_x\": final_centroid_x,\n", + " \"final_centroid_y\": final_centroid_y,\n", + " \"sample_size\": sample_size,\n", + " \"k\": k,\n", + " \"max_iterations\": 30,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/36_radix_sort.ipynb b/challenges/colab_exports/hard/36_radix_sort.ipynb new file mode 100644 index 00000000..6b85ae4d --- /dev/null +++ b/challenges/colab_exports/hard/36_radix_sort.ipynb @@ -0,0 +1,514 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a radix sort algorithm that sorts an array of 32-bit unsigned integers on a GPU.\n The program should take an input array of unsigned integers and sort them in ascending order using the radix sort algorithm.\n The input parameter contains the unsorted array, and the sorted result should be stored in the output array.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final sorted result must be stored in the output array
    • \n
    • Use radix sort algorithm (not other sorting algorithms)
    • \n
    • Sort in ascending order
    • \n
    \n\n

    Example 1:

    \n
    \n  Input:  [170, 45, 75, 90, 2, 802, 24, 66]\n  Output: [2, 24, 45, 66, 75, 90, 170, 802]\n  
    \n\n

    Example 2:

    \n
    \n  Input:  [1, 4, 1, 3, 555, 1000, 2]\n  Output: [1, 1, 2, 3, 4, 555, 1000]\n  
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 100,000,000
    • \n
    • 0 \u2264 input[i] \u2264 4,294,967,295 (32-bit unsigned integers)
    • \n\n
    • Performance is measured with N = 50,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const unsigned int* input, unsigned int* output, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[UInt32, MutExternalOrigin],\n output: UnsafePointer[UInt32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef radix_sort_kernel(input, output, N):\n input = input.to(tl.pointer_type(tl.uint32))\n output = output.to(tl.pointer_type(tl.uint32))\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Radix Sort\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + "\n", + " assert input.dtype == torch.uint32\n", + " assert output.dtype == torch.uint32\n", + " assert input.shape == output.shape == (N,)\n", + "\n", + " # Convert uint32 to int64 for sorting (since torch.sort doesn't support uint32)\n", + " input_int64 = input.to(torch.int64)\n", + " sorted_tensor = torch.sort(input_int64)[0]\n", + " # Convert back to uint32\n", + " output.copy_(sorted_tensor.to(torch.uint32))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_uint32), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_uint32), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.uint32\n", + " N = 8\n", + " input_data = torch.tensor([170, 45, 75, 90, 2, 802, 24, 66], device=\"cuda\", dtype=dtype)\n", + " output_data = torch.zeros(N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_data,\n", + " \"output\": output_data,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.uint32\n", + " test_cases = []\n", + "\n", + " # Test case 1: basic example\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [170, 45, 75, 90, 2, 802, 24, 66], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.zeros(8, device=\"cuda\", dtype=dtype),\n", + " \"N\": 8,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: duplicate numbers\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1, 4, 1, 3, 555, 1000, 2], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(7, device=\"cuda\", dtype=dtype),\n", + " \"N\": 7,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: single element\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([42], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: already sorted\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([1, 2, 3, 4, 5, 6], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(6, device=\"cuda\", dtype=dtype),\n", + " \"N\": 6,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: reverse sorted\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor([6, 5, 4, 3, 2, 1], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(6, device=\"cuda\", dtype=dtype),\n", + " \"N\": 6,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: large numbers\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [4294967295, 1000000000, 500000000, 2000000000, 100000000],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # Test case 7: medium random\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(0, 1000001, (1024,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1024, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + "\n", + " # Test case 8: large random\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.randint(0, 4294967296, (10000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(10000, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.uint32\n", + " N = 50000000\n", + " return {\n", + " \"input\": torch.randint(0, 4294967296, (N,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/39_Fast_Fourier_transform.ipynb b/challenges/colab_exports/hard/39_Fast_Fourier_transform.ipynb new file mode 100644 index 00000000..aac640a1 --- /dev/null +++ b/challenges/colab_exports/hard/39_Fast_Fourier_transform.ipynb @@ -0,0 +1,483 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that computes the Fast Fourier Transform (FFT) of a\n complex-valued 1-D signal. Given an input signal array containing\n N complex numbers stored as interleaved real/imaginary pairs,\n compute the discrete Fourier transform and store the result in the\n spectrum array. The FFT converts a time-domain signal into its\n frequency-domain representation using the formula: $$ X_k = \\sum_{n=0}^{N-1}\n x_n \\cdot e^{-j 2\\pi kn / N} \\quad \\text{for } k = 0, 1, \\ldots, N-1 $$ The\n FFT algorithm reduces the computational complexity from O(N\u00b2) to O(N log N) by\n exploiting symmetries in the twiddle factors.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries (cuFFT etc.) are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the spectrum array
    • \n
    • The kernel must be entirely GPU-resident\u2014no host-side FFT calls
    • \n
    • \n Both input and output use interleaved real/imaginary layout:\n [real\u2080, imag\u2080, real\u2081, imag\u2081, ...]\n
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  N = 4\n        signal = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n        (represents: [1+0j, 0+0j, 0+0j, 0+0j])\n\nOutput: spectrum = [1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0]\n        (represents: [1+0j, 1+0j, 1+0j, 1+0j])\n
    \n\n

    Example 2:

    \n
    \nInput:  N = 2\n        signal = [1.0, 0.0, 1.0, 0.0]\n        (represents: [1+0j, 1+0j])\n\nOutput: spectrum = [2.0, 0.0, 0.0, 0.0]\n        (represents: [2+0j, 0+0j])\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 262,144
    • \n
    • All values are 32-bit floating point numbers
    • \n
    • Absolute error \u2264 1e-3 and relative error \u2264 1e-3
    • \n
    • Input and output arrays have length 2 \u00d7 N
    • \n\n
    • Performance is measured with N = 262,144
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// signal and spectrum are device pointers\nextern \"C\" void solve(const float* signal, float* spectrum, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# signal, spectrum are tensors on the GPU\n@cute.jit\ndef solve(signal: cute.Tensor, spectrum: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# signal is a tensor on GPU\n@jax.jit\ndef solve(signal: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# signal and spectrum are device pointers\n@export\ndef solve(\n signal: UnsafePointer[Float32, MutExternalOrigin],\n spectrum: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# signal and spectrum are device pointers\ndef solve(signal: torch.Tensor, spectrum: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# signal and spectrum are tensors on the GPU\ndef solve(signal: torch.Tensor, spectrum: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Fast Fourier Transform\", atol=1e-3, rtol=1e-3, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, signal: torch.Tensor, spectrum: torch.Tensor, N: int):\n", + " \"\"\"\n", + " Ground-truth implementation using torch.fft. Assumes both tensors are\n", + " on the same device (CPU or CUDA). Works for any N (power-of-two not\n", + " required, but contestants may optimise for radix-2).\n", + "\n", + " Args\n", + " ----\n", + " signal : flattened real/imag interleaved input (len == 2 \u00d7 N)\n", + " spectrum : flattened real/imag interleaved output (len == 2 \u00d7 N)\n", + " N : number of complex samples\n", + " \"\"\"\n", + " assert signal.shape == (2 * N,)\n", + " assert spectrum.shape == (2 * N,)\n", + " assert signal.dtype == spectrum.dtype\n", + " assert signal.device == spectrum.device\n", + "\n", + " # View as (N, 2) \u2192 complex tensor\n", + " sig_ri = signal.view(N, 2)\n", + " sig_c = torch.complex(sig_ri[:, 0], sig_ri[:, 1])\n", + "\n", + " # Torch reference FFT\n", + " spec_c = torch.fft.fft(sig_c)\n", + "\n", + " # Write back as interleaved real/imag\n", + " spec_ri = torch.stack((spec_c.real, spec_c.imag), dim=1).contiguous()\n", + " spectrum.copy_(spec_ri.view(-1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"signal\": (ctypes.POINTER(ctypes.c_float), \"in\"), # in (2 \u00d7 N),\n", + " \"spectrum\": (ctypes.POINTER(ctypes.c_float), \"out\"), # out (2 \u00d7 N),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " # Impulse signal \u03b4[n] = 1 when n=0 else 0 (expected flat spectrum)\n", + " signal = torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype)\n", + " spectrum = torch.empty(2 * N, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " cases: List[Dict[str, Any]] = []\n", + "\n", + " # 1. Constant signal (all ones) \u2013 DC spike only\n", + " N = 8\n", + " const_sig = torch.ones(2 * N, device=\"cuda\", dtype=dtype)\n", + " const_spec = torch.empty_like(const_sig)\n", + " cases.append({\"signal\": const_sig, \"spectrum\": const_spec, \"N\": N})\n", + "\n", + " # 2. Single-frequency sinusoid (real: cos, imag: sin)\n", + " N = 16\n", + " k = 3 # frequency bin\n", + " n = torch.arange(N, device=\"cuda\", dtype=dtype)\n", + " real = torch.cos(2.0 * torch.pi * k * n / N)\n", + " imag = torch.sin(2.0 * torch.pi * k * n / N)\n", + " sinusoid = torch.stack((real, imag), dim=1).contiguous().view(-1)\n", + " sinusoid_spec = torch.empty_like(sinusoid)\n", + " cases.append({\"signal\": sinusoid, \"spectrum\": sinusoid_spec, \"N\": N})\n", + "\n", + " # 3. Random complex signal, power-of-two length\n", + " N = 256\n", + " rnd = torch.empty(2 * N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " rnd_spec = torch.empty_like(rnd)\n", + " cases.append({\"signal\": rnd, \"spectrum\": rnd_spec, \"N\": N})\n", + "\n", + " # 4. Random complex signal, non-power-of-two length\n", + " N = 250\n", + " rnd_np2 = torch.empty(2 * N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " rnd_np2_spec = torch.empty_like(rnd_np2)\n", + " cases.append({\"signal\": rnd_np2, \"spectrum\": rnd_np2_spec, \"N\": N})\n", + "\n", + " # 5. Medium-size signal (performance sanity)\n", + " N = 4096\n", + " med = torch.empty(2 * N, device=\"cuda\", dtype=dtype).normal_(0.0, 0.5)\n", + " med_spec = torch.empty_like(med)\n", + " cases.append({\"signal\": med, \"spectrum\": med_spec, \"N\": N})\n", + "\n", + " return cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 262_144 # 256 K complex samples (~2 MiB real/imag)\n", + " big_sig = torch.empty(2 * N, device=\"cuda\", dtype=dtype).normal_(0.0, 1.0)\n", + " big_spec = torch.empty_like(big_sig)\n", + " return {\"signal\": big_sig, \"spectrum\": big_spec, \"N\": N}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/46_bfs_shortest_path.ipynb b/challenges/colab_exports/hard/46_bfs_shortest_path.ipynb new file mode 100644 index 00000000..2c688cb9 --- /dev/null +++ b/challenges/colab_exports/hard/46_bfs_shortest_path.ipynb @@ -0,0 +1,627 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that finds the shortest path in an unweighted 2D grid using Breadth-First Search (BFS). Given a grid with obstacles and start/end positions, return the minimum number of steps needed to reach the destination.\n

    \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n #\n #\n #\n #\n #\n #\n #\n \n S\n Start\n \n E\n End\n \n 1\n 2\n 3\n 4\n 5\n 6\n 7\n \n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • Return the shortest path length, or -1 if no path exists
    • \n
    • Grid cells with value 0 are free, cells with value 1 are obstacles
    • \n
    • Movement is allowed in 4 directions: up, down, left, right
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\n  grid (4x4) = [\n    [0, 0, 0, 0],\n    [1, 1, 0, 1],\n    [0, 0, 0, 0],\n    [0, 1, 1, 0]\n  ]\n  start_row = 0, start_col = 0\n  end_row = 3, end_col = 3\n\nOutput: 6\n\nExplanation: One possible shortest path:\n(0,0) \u2192 (0,1) \u2192 (0,2) \u2192 (1,2) \u2192 (2,2) \u2192 (2,3) \u2192 (3,3)\n
    \n\n

    Example 2:

    \n
    \nInput:\n  grid (3x3) = [\n    [0, 1, 0],\n    [1, 1, 1],\n    [0, 0, 0]\n  ]\n  start_row = 0, start_col = 0\n  end_row = 0, end_col = 2\n\nOutput: -1\n\nExplanation: No path exists due to obstacles completely blocking the way.\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 rows, cols \u2264 1000
    • \n
    • Grid values are either 0 (free) or 1 (obstacle)
    • \n
    • Start and end positions are guaranteed to be within bounds and on free cells (value 0)
    • \n
    • Start and end positions may be the same (return 0 in this case)
    • \n\n
    • Performance is measured with cols = 500, rows = 500
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// grid, result are device pointers\nextern \"C\" void solve(const int* grid, int* result, int rows, int cols, int start_row,\n int start_col, int end_row, int end_col) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# grid, result are tensors on the GPU\n@cute.jit\ndef solve(\n grid: cute.Tensor,\n result: cute.Tensor,\n rows: cute.Int32,\n cols: cute.Int32,\n start_row: cute.Int32,\n start_col: cute.Int32,\n end_row: cute.Int32,\n end_col: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# grid is a tensor on the GPU\n@jax.jit\ndef solve(\n grid: jax.Array,\n rows: int,\n cols: int,\n start_row: int,\n start_col: int,\n end_row: int,\n end_col: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.memory import UnsafePointer\n\n\n# grid, result are device pointers\n@export\ndef solve(\n grid: UnsafePointer[Int32, MutExternalOrigin],\n result: UnsafePointer[Int32, MutExternalOrigin],\n rows: Int32,\n cols: Int32,\n start_row: Int32,\n start_col: Int32,\n end_row: Int32,\n end_col: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# grid, result are tensors on the GPU\ndef solve(\n grid: torch.Tensor,\n result: torch.Tensor,\n rows: int,\n cols: int,\n start_row: int,\n start_col: int,\n end_row: int,\n end_col: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# grid, result are tensors on the GPU\ndef solve(\n grid: torch.Tensor,\n result: torch.Tensor,\n rows: int,\n cols: int,\n start_row: int,\n start_col: int,\n end_row: int,\n end_col: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"BFS Shortest Path\", atol=0, rtol=0, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(\n", + " self,\n", + " grid: torch.Tensor,\n", + " result: torch.Tensor,\n", + " rows: int,\n", + " cols: int,\n", + " start_row: int,\n", + " start_col: int,\n", + " end_row: int,\n", + " end_col: int,\n", + " ):\n", + " \"\"\"\n", + " Reference implementation that finds shortest path using BFS.\n", + "\n", + " Args:\n", + " grid: Flattened 2D grid of size rows*cols (0=free, 1=obstacle)\n", + " result: Single element tensor to store the result\n", + " rows, cols: Grid dimensions\n", + " start_row, start_col: Starting position\n", + " end_row, end_col: Target position\n", + " \"\"\"\n", + " assert grid.dtype == torch.int32\n", + " assert result.dtype == torch.int32\n", + " assert grid.shape == (rows * cols,)\n", + " assert result.shape == (1,)\n", + " assert 0 <= start_row < rows and 0 <= start_col < cols\n", + " assert 0 <= end_row < rows and 0 <= end_col < cols\n", + "\n", + " # If start and end are the same\n", + " if start_row == end_row and start_col == end_col:\n", + " result[0] = 0\n", + " return\n", + "\n", + " # Reshape grid for easier indexing\n", + " grid_2d = grid.view(rows, cols)\n", + "\n", + " # BFS implementation\n", + " from collections import deque\n", + "\n", + " # Directions: up, down, left, right\n", + " directions = [(-1, 0), (1, 0), (0, -1), (0, 1)]\n", + "\n", + " # Initialize visited array\n", + " visited = torch.zeros((rows, cols), dtype=torch.bool, device=grid.device)\n", + "\n", + " # BFS queue: (row, col, distance)\n", + " queue = deque([(start_row, start_col, 0)])\n", + " visited[start_row, start_col] = True\n", + "\n", + " while queue:\n", + " row, col, dist = queue.popleft()\n", + "\n", + " # Check if we reached the target\n", + " if row == end_row and col == end_col:\n", + " result[0] = dist\n", + " return\n", + "\n", + " # Explore neighbors\n", + " for dr, dc in directions:\n", + " new_row, new_col = row + dr, col + dc\n", + "\n", + " # Check bounds\n", + " if 0 <= new_row < rows and 0 <= new_col < cols:\n", + " # Check if not visited and not obstacle\n", + " if not visited[new_row, new_col] and grid_2d[new_row, new_col] == 0:\n", + " visited[new_row, new_col] = True\n", + " queue.append((new_row, new_col, dist + 1))\n", + "\n", + " # No path found\n", + " result[0] = -1\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"grid\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"result\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"rows\": (ctypes.c_int, \"in\"),\n", + " \"cols\": (ctypes.c_int, \"in\"),\n", + " \"start_row\": (ctypes.c_int, \"in\"),\n", + " \"start_col\": (ctypes.c_int, \"in\"),\n", + " \"end_row\": (ctypes.c_int, \"in\"),\n", + " \"end_col\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype_int = torch.int32\n", + "\n", + " # Example from challenge.html\n", + " # Grid: [[0,0,0,0], [1,1,0,1], [0,0,0,0], [0,1,1,0]]\n", + " grid_data = torch.tensor(\n", + " [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0], # row 0 # row 1 # row 2 # row 3\n", + " device=\"cuda\",\n", + " dtype=dtype_int,\n", + " )\n", + "\n", + " result_data = torch.tensor([0], device=\"cuda\", dtype=dtype_int)\n", + "\n", + " return {\n", + " \"grid\": grid_data,\n", + " \"result\": result_data,\n", + " \"rows\": 4,\n", + " \"cols\": 4,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 3,\n", + " \"end_col\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype_int = torch.int32\n", + " test_cases = []\n", + "\n", + " # Test case 1: Simple path exists\n", + " test_cases.append(\n", + " {\n", + " \"grid\": torch.tensor([0, 0, 1, 0, 0, 0], device=\"cuda\", dtype=dtype_int),\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 2,\n", + " \"cols\": 3,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 1,\n", + " \"end_col\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: No path (blocked)\n", + " test_cases.append(\n", + " {\n", + " \"grid\": torch.tensor([0, 1, 0, 1, 0], device=\"cuda\", dtype=dtype_int),\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 1,\n", + " \"cols\": 5,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 0,\n", + " \"end_col\": 4,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: Same start and end\n", + " test_cases.append(\n", + " {\n", + " \"grid\": torch.tensor([0, 1, 0, 0], device=\"cuda\", dtype=dtype_int),\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 2,\n", + " \"cols\": 2,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 0,\n", + " \"end_col\": 0,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: Single cell\n", + " test_cases.append(\n", + " {\n", + " \"grid\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 1,\n", + " \"cols\": 1,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 0,\n", + " \"end_col\": 0,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: Larger grid with path\n", + " large_grid = torch.zeros(25, device=\"cuda\", dtype=dtype_int) # 5x5 grid\n", + " large_grid[6] = 1 # obstacle at (1,1)\n", + " large_grid[7] = 1 # obstacle at (1,2)\n", + " large_grid[8] = 1 # obstacle at (1,3)\n", + " test_cases.append(\n", + " {\n", + " \"grid\": large_grid,\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 5,\n", + " \"cols\": 5,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 4,\n", + " \"end_col\": 4,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: Complex maze\n", + " maze_grid = torch.tensor(\n", + " [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0],\n", + " device=\"cuda\",\n", + " dtype=dtype_int,\n", + " )\n", + " test_cases.append(\n", + " {\n", + " \"grid\": maze_grid,\n", + " \"result\": torch.tensor([0], device=\"cuda\", dtype=dtype_int),\n", + " \"rows\": 5,\n", + " \"cols\": 5,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": 4,\n", + " \"end_col\": 4,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype_int = torch.int32\n", + " rows, cols = 500, 500\n", + "\n", + " # Create a large grid with some random obstacles\n", + " torch.manual_seed(42)\n", + " grid = torch.randint(0, 2, (rows * cols,), device=\"cuda\", dtype=dtype_int)\n", + "\n", + " # Ensure start and end are free\n", + " grid[0] = 0 # start at (0,0)\n", + " grid[-1] = 0 # end at (rows-1, cols-1)\n", + "\n", + " # Create some clear paths to avoid always getting -1\n", + " for i in range(0, rows * cols, cols):\n", + " if i + cols - 1 < rows * cols:\n", + " grid[i : i + min(cols, 10)] = 0 # Clear first 10 cells of each row\n", + "\n", + " result = torch.tensor([0], device=\"cuda\", dtype=dtype_int)\n", + "\n", + " return {\n", + " \"grid\": grid,\n", + " \"result\": result,\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " \"start_row\": 0,\n", + " \"start_col\": 0,\n", + " \"end_row\": rows - 1,\n", + " \"end_col\": cols - 1,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/53_casual_attention.ipynb b/challenges/colab_exports/hard/53_casual_attention.ipynb new file mode 100644 index 00000000..1911bf61 --- /dev/null +++ b/challenges/colab_exports/hard/53_casual_attention.ipynb @@ -0,0 +1,510 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    Implement Causal (masked) Self-Attention for a given set of matrices.\n Given the query matrix Q of size M\u00d7d, key matrix K of size M\u00d7d, and value matrix\n V of size M\u00d7d, your program should compute the output matrix using the formula:\n $$\\text{Attention}_{\\text{causal}}(Q, K, V) = \\text{softmax}\\Bigl(\\text{masked}\\Bigl( \\frac{QK^T}{\\sqrt{d}} \\Bigr)\\Bigr)V$$\n

    \n\n\n

    \n where mask is a causal mask that sets all positions corresponding to keys after the current query to $-\\infty$.\n $$$$\n i.e., for query i and key j:\n $$\n \\text{masked}(a_{ij}) =\n \\begin{cases}\n a_{ij}, & j \\le i \\\\\n -\\infty, & j > i\n \\end{cases}\n $$\n The softmax function is applied row-wise. Q, K, V, and output are all of data type float32;\n M, and d are of data type int32.\n

    \n\n\n \n\n \n key position →\n query →\n\n \n 0\n 1\n 2\n 3\n 4\n 5\n\n \n 0\n 1\n 2\n 3\n 4\n 5\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n -∞\n -∞\n -∞\n -∞\n -∞\n \n -∞\n -∞\n -∞\n -∞\n \n -∞\n -∞\n -∞\n \n -∞\n -∞\n \n -∞\n\n \n \n \n \n \n \n \n \n \n \n \n\n \n \n attend\n \n masked\n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the output matrix\n output\n
    • \n
    \n

    Example 1:

    \n

    \nInput:
    \nQ (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nK (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nV (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n3.4898374 & 4.4898374 & 5.4898374 & 6.4898374\n\\end{bmatrix}\n$$\n

    \n\n\n

    Example 2:

    \n

    \nInput:
    \nQ (2\u00d72):\n$$\n\\begin{bmatrix}\n0.0 & 0.0 \\\\\n1.0 & 1.0\n\\end{bmatrix}\n$$\nK (2\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 \\\\\n0.0 & 1.0\n\\end{bmatrix}\n$$\nV (2\u00d72):\n$$\n\\begin{bmatrix}\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (2\u00d72):\n$$\n\\begin{bmatrix}\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\n

    \n\n\n

    Constraints

    \n
      \n
    • Matrix Q, K, and V are all of size M\u00d7d
    • \n
    • 1 ≤ M ≤ 10000
    • \n
    • 1 ≤ d ≤ 128
    • \n
    • All elements in Q, K, and V are sampled from[-100.0, 100.0]
    • \n
    • Data type for all matrices is float32
    • \n\n
    • Performance is measured with M = 5,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int M, int d) {\n\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n d: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, K: jax.Array, V: jax.Array, M: int, d: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, K, V, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n d: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, d: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, d: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Causal Self-Attention\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " d: int,\n", + " ):\n", + " scale = d**0.5\n", + " attn = torch.matmul(Q, K.t()) / scale\n", + "\n", + " # add mask\n", + " mask = torch.triu(torch.ones(M, M, device=attn.device), diagonal=1).bool()\n", + " attn = attn.masked_fill(mask, float(\"-inf\"))\n", + " attn = torch.softmax(attn, dim=1)\n", + " torch.matmul(attn, V, out=output)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"d\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": 2, \"d\": 4}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"d\": 4,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, 5, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"d\": 5,\n", + " }\n", + " )\n", + "\n", + " # mixed_values\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0], [-7.0, 8.0, -9.0], [10.0, -11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[2.0, -1.0, 3.0], [-4.0, 5.0, -6.0], [7.0, -8.0, 9.0], [-10.0, 11.0, -12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 0.5, -0.5], [-1.0, 2.0, 3.0], [4.0, -2.0, 1.0], [0.0, 1.0, -1.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(4, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"d\": 3,\n", + " }\n", + " )\n", + "\n", + " # large_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(128, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 128,\n", + " \"d\": 32,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, d = 5000, 128\n", + " Q = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " K = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " V = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " output = torch.empty(M, d, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": M, \"d\": d}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/56_linear_attention.ipynb b/challenges/colab_exports/hard/56_linear_attention.ipynb new file mode 100644 index 00000000..5f15d4da --- /dev/null +++ b/challenges/colab_exports/hard/56_linear_attention.ipynb @@ -0,0 +1,530 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement Linear Attention for a given set of matrices, following the method described in\n \n \"Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention\"\n .\n Given the query matrix Q of size M\u00d7d, key matrix K of size M\u00d7d, and value matrix\n V of size M\u00d7d, your program should compute the output matrix using the formula:\n $$\n \\text{LinearAttention}(Q, K, V) = \\frac{\\phi(Q) \\left(\\phi(K)^T V \\right)}{\\phi(Q) \\left(\\sum_j \\phi(K_j) \\right)}\n $$\n

    \n\n

    \n where $ \\phi(x) $ is a feature map applied element-wise, for example:\n $$\n \\phi(x) = \\text{ELU}(x) + 1 =\n \\begin{cases}\n x + 1, & x > 0 \\\\\n e^x, & x \\le 0\n \\end{cases}\n $$\n All matrices Q, K, V, and output are of type float32, and M and d are of type int32.\n

    \n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the output matrix\n output\n
    • \n
    \n

    Example 1:

    \n

    \nInput:
    \nQ (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nK (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nV (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (2\u00d74):\n$$\n\\begin{bmatrix}\n2.8461537 & 3.8461537 & 4.8461537 & 5.8461537 \\\\\n3.1538463 & 4.1538463 & 5.1538463 & 6.1538463\n\\end{bmatrix}\n$$\n

    \n\n\n

    Example 2:

    \n

    \nInput:
    \nQ (2\u00d72):\n$$\n\\begin{bmatrix}\n0.0 & 0.0 \\\\\n1.0 & 1.0\n\\end{bmatrix}\n$$\nK (2\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 \\\\\n0.0 & 1.0\n\\end{bmatrix}\n$$\nV (2\u00d72):\n$$\n\\begin{bmatrix}\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (2\u00d72):\n$$\n\\begin{bmatrix}\n4.0 & 5.0 \\\\\n4.0 & 5.0\n\\end{bmatrix}\n$$\n

    \n\n\n

    Constraints

    \n
      \n
    • Matrix Q, K, and V are all of size M\u00d7d
    • \n
    • 1 ≤ M ≤ 10000
    • \n
    • 1 ≤ d ≤ 128
    • \n
    • All elements in Q, K, and V are sampled from[-100.0, 100.0]
    • \n
    • Data type for all matrices is float32
    • \n\n
    • Performance is measured with M = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int M, int d) {\n\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n d: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, K: jax.Array, V: jax.Array, M: int, d: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, K, V, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n d: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, d: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, d: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Linear Self-Attention\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " d: int,\n", + " ):\n", + " assert Q.shape == K.shape == V.shape == output.shape == (M, d)\n", + " # \u03c6(x) = ELU(x) + 1\n", + " phi_Q = torch.where(Q > 0, Q + 1, torch.exp(Q))\n", + " phi_K = torch.where(K > 0, K + 1, torch.exp(K))\n", + "\n", + " # S = sum_j \u03c6(K_j) V_j^T = \u03c6(K)^T V\n", + " S = phi_K.T @ V # (d,M) @ (M,d) = (d, d)\n", + " # z = sum_j \u03c6(K_j)\n", + " z = phi_K.sum(dim=0) # (d,)\n", + "\n", + " # numerator: \u03c6(Q_i) @ S \u2192 (M,d)\n", + " numerator = phi_Q @ S # (M,d) @ (d,d) = (M,d)\n", + " # denominator: \u03c6(Q_i) @ z \u2192 (scalar)\n", + " denominator = phi_Q @ z # (M,d) @ (d,) = (M,)\n", + "\n", + " output.copy_(numerator / denominator.unsqueeze(-1)) # (M, d)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"d\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": 2, \"d\": 4}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"d\": 4,\n", + " }\n", + " )\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor([[0.0, 0.0], [1.0, 1.0]], device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.tensor([[3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, 2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"d\": 2,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, 5, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"d\": 5,\n", + " }\n", + " )\n", + "\n", + " # mixed_values\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0], [-7.0, 8.0, -9.0], [10.0, -11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[2.0, -1.0, 3.0], [-4.0, 5.0, -6.0], [7.0, -8.0, 9.0], [-10.0, 11.0, -12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 0.5, -0.5], [-1.0, 2.0, 3.0], [4.0, -2.0, 1.0], [0.0, 1.0, -1.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(4, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"d\": 3,\n", + " }\n", + " )\n", + "\n", + " # large_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(128, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 128,\n", + " \"d\": 32,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, d = 10000, 128\n", + " Q = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " K = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " V = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " output = torch.empty(M, d, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": M, \"d\": d}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/59_sliding_window_attn.ipynb b/challenges/colab_exports/hard/59_sliding_window_attn.ipynb new file mode 100644 index 00000000..463c81f8 --- /dev/null +++ b/challenges/colab_exports/hard/59_sliding_window_attn.ipynb @@ -0,0 +1,544 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement Sliding Window Self-Attention for a given set of matrices.\n Before introducing the sliding window version, let's first recall standard Self-Attention.\n

    \n\n

    1. Standard Softmax Attention

    \n

    \n Given query matrix Q, key matrix K, and value matrix V, each position i attends to all positions j using a softmax-weighted sum:\n

    \n\n

    \n $ \\text{score}_{i,j} = \\frac{Q_i \\cdot K_j}{\\sqrt{d}} $\n

    \n\n

    \n $ \\text{output}_i = \\sum_{j=1}^{M} \\text{softmax}(\\text{score}_{i,*})_j \\cdot V_j $\n

    \n\n

    \n In other words, each query computes similarity with all keys, applies a softmax to get attention weights, and then computes a weighted sum of values.\n

    \n\n

    2. Sliding Window Self-Attention

    \n

    \n Sliding Window Attention modifies standard attention by restricting each query to attend only to a local window around its position.\n

    \n\n
      \n
    • For each position i, only consider the keys and values within a window of size window_size around i (positions [i-window_size, ..., i+window_size]).
    • \n
    • Compute similarity scores between Qi and the keys in this window:
    • \n
    \n\n

    \n $ \\text{score}_{i,j} = \\frac{Q_i \\cdot K_j}{\\sqrt{d}} $\n

    \n\n
      \n
    • Apply softmax over these local scores to obtain attention weights.
    • \n
    • Use the weights to compute a weighted average of the values in the same window:
    • \n
    \n\n

    \n $ \\text{output}_i = \\sum_{j \\in [i-\\text{window_size}, \\, i+\\text{window_size}]} \\text{softmax}(\\text{score}_{i,*})_j \\cdot V_j $\n

    \n\n

    \n In short, each query only attends to its nearby neighbors.\n

    \n\n\n \n \n \n\n \n key position →\n query position\n\n \n \n 0\n 1\n 2\n 3\n 4\n 5\n 6\n 7\n \n\n \n \n 0\n 1\n 2\n 3\n 4\n 5\n 6\n 7\n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n\n \n window_size = 2\n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the output matrix\n output\n
    • \n
    \n

    Example 1:

    \n

    \nInput:
    \nQ (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nK (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nV (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0\n\\end{bmatrix}\n$$\nwindow_size: 1\n

    \n\n

    \nOutput:
    \noutput (2\u00d74):\n$$\n\\begin{bmatrix}\n2.5101628 & 3.5101628 & 4.510163 & 5.510163 \\\\\n3.4898374 & 4.4898376 & 5.4898376 & 6.489837\n\\end{bmatrix}\n$$\n

    \n\n\n

    Example 2:

    \n

    \n Input:
    \n Q (2\u00d73):\n $$\n \\begin{bmatrix}\n 0.0 & 0.0 & 0.0 \\\\\n 0.0 & 1.0 & 0.0\n \\end{bmatrix}\n $$\n K (2\u00d73):\n $$\n \\begin{bmatrix}\n 1.0 & 0.0 & 0.0 \\\\\n 0.0 & 1.0 & 0.0\n \\end{bmatrix}\n $$\n V (2\u00d73):\n $$\n \\begin{bmatrix}\n 1.0 & 2.0 & 3.0 \\\\\n 5.0 & 6.0 & 7.0\n \\end{bmatrix}\n $$\n window_size: 1\n

    \n\n

    \n Output:
    \n output (2\u00d73):\n $$\n \\begin{bmatrix}\n 3.0 & 4.0 & 5.0 \\\\\n 3.5618298 & 4.56183 & 5.5618296\n \\end{bmatrix}\n $$\n

    \n\n\n\n

    Constraints

    \n
      \n
    • Matrix Q, K, and V are all of size M\u00d7d
    • \n
    • 1 ≤ M ≤ 10000
    • \n
    • 1 ≤ d ≤ 128
    • \n
    • 1 ≤ window_size ≤ 32
    • \n
    • All elements in Q, K, and V are sampled from[-100.0, 100.0]
    • \n
    • Data type for all matrices is float32
    • \n\n
    • Performance is measured with M = 5,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int M, int d,\n int window_size) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n M: int,\n d: int,\n window_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, K: jax.Array, V: jax.Array, M: int, d: int, window_size: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, K, V, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n d: Int32,\n window_size: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n M: int,\n d: int,\n window_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n M: int,\n d: int,\n window_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Sliding Window Self-Attention\",\n", + " atol=1e-05,\n", + " rtol=1e-05,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " d: int,\n", + " window_size: int,\n", + " ):\n", + " assert Q.shape == K.shape == V.shape == output.shape == (M, d)\n", + "\n", + " scores = (Q @ K.T) / (d**0.5)\n", + "\n", + " idxs = torch.arange(M)\n", + " mask = (idxs[None, :] - idxs[:, None]).abs() > window_size\n", + " mask = mask.to(Q.device)\n", + " scores.masked_fill_(mask, float(\"-inf\"))\n", + " attn = torch.softmax(scores, dim=1)\n", + "\n", + " torch.matmul(attn, V, out=output)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"d\": (ctypes.c_int, \"in\"),\n", + " \"window_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " V = torch.tensor([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": 2, \"d\": 4, \"window_size\": 1}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"d\": 4,\n", + " \"window_size\": 1,\n", + " }\n", + " )\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]], device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.tensor([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.tensor([[1.0, 2.0, 3.0], [5.0, 6.0, 7.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"d\": 3,\n", + " \"window_size\": 1,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, 5, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"d\": 5,\n", + " \"window_size\": 2,\n", + " }\n", + " )\n", + "\n", + " # mixed_values\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0], [-7.0, 8.0, -9.0], [10.0, -11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[2.0, -1.0, 3.0], [-4.0, 5.0, -6.0], [7.0, -8.0, 9.0], [-10.0, 11.0, -12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 0.5, -0.5], [-1.0, 2.0, 3.0], [4.0, -2.0, 1.0], [0.0, 1.0, -1.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(4, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"d\": 3,\n", + " \"window_size\": 2,\n", + " }\n", + " )\n", + "\n", + " # large_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(128, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 128,\n", + " \"d\": 32,\n", + " \"window_size\": 8,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, d, window_size = 5000, 64, 16\n", + " Q = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " K = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " V = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-100, 100)\n", + " output = torch.empty(M, d, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"M\": M,\n", + " \"d\": d,\n", + " \"window_size\": window_size,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/73_all_pairs_shortest_paths.ipynb b/challenges/colab_exports/hard/73_all_pairs_shortest_paths.ipynb new file mode 100644 index 00000000..e6f6af06 --- /dev/null +++ b/challenges/colab_exports/hard/73_all_pairs_shortest_paths.ipynb @@ -0,0 +1,606 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given a weighted directed graph of N vertices represented as an\n N × N distance matrix, compute the shortest path distance between\n every pair of vertices using the Floyd-Warshall algorithm. The matrix is stored as a flat array in\n row-major order: dist[i * N + j] is the weight of the directed edge from vertex\n i to vertex j. A value of +infinity means no direct edge\n exists. The diagonal is always zero. For each intermediate vertex k from 0 to N - 1\n (in order), update all pairs:\n

    \n

    \n $$\n \\text{output}[i][j] = \\min\\!\\bigl(\\text{output}[i][j],\\;\n \\text{output}[i][k] + \\text{output}[k][j]\\bigr)\n \\quad \\forall\\, i, j\n $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in output
    • \n
    \n\n

    Example:

    \n
    \nInput: N = 4\ndist = [\n  0,   5, inf,  10,   // row 0: edges from vertex 0\n  inf, 0,   3, inf,   // row 1: edges from vertex 1\n  inf, inf, 0,   1,   // row 2: edges from vertex 2\n  inf, inf, inf, 0    // row 3: edges from vertex 3\n]\n\nOutput:\noutput = [\n  0,   5,   8,   9,   // shortest paths from vertex 0\n  inf, 0,   3,   4,   // shortest paths from vertex 1\n  inf, inf, 0,   1,   // shortest paths from vertex 2\n  inf, inf, inf, 0    // shortest paths from vertex 3\n]\n\nExplanation:\n- output[0][2] = 8   (path 0 -> 1 -> 2, cost 5 + 3 = 8)\n- output[0][3] = 9   (path 0 -> 1 -> 2 -> 3, cost 5 + 3 + 1 = 9, beats direct 0 -> 3 = 10)\n- output[1][3] = 4   (path 1 -> 2 -> 3, cost 3 + 1 = 4)\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 4,096
    • \n
    • Edge weights are finite float32 values or +infinity (no edge)
    • \n
    • The input contains no negative cycles
    • \n
    • The diagonal satisfies dist[i * N + i] = 0 for all i
    • \n
    • dist and output are flat arrays of N × N floats in row-major order
    • \n
    • Performance is measured with N = 2,048
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// dist, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* dist, float* output, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# dist, output are tensors on the GPU\n@cute.jit\ndef solve(dist: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# dist is a tensor on the GPU\n@jax.jit\ndef solve(dist: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# dist, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n dist: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# dist, output are tensors on the GPU\ndef solve(dist: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# dist, output are tensors on the GPU\ndef solve(dist: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "def _make_graph(N: int, density: float = 0.5, max_weight: float = 10.0, seed: int = None):\n", + " \"\"\"Create a random non-negative weighted directed graph as a flat float32 CUDA tensor.\"\"\"\n", + " if seed is not None:\n", + " torch.manual_seed(seed)\n", + " d = torch.full((N * N,), float(\"inf\"), device=\"cuda\", dtype=torch.float32)\n", + " d_view = d.view(N, N)\n", + " d_view.fill_diagonal_(0.0)\n", + " if N > 1:\n", + " mask = torch.rand(N, N, device=\"cuda\") < density\n", + " mask.fill_diagonal_(False)\n", + " weights = torch.rand(N, N, device=\"cuda\") * max_weight + 0.1\n", + " d_view[mask] = weights[mask]\n", + " return d\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"All-Pairs Shortest Paths\",\n", + " atol=1e-02,\n", + " rtol=1e-02,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(self, dist: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert dist.shape == (N * N,)\n", + " assert output.shape == (N * N,)\n", + " assert dist.dtype == output.dtype == torch.float32\n", + " assert dist.device == output.device\n", + " assert dist.device.type == \"cuda\"\n", + " d = dist.view(N, N).clone()\n", + " for k in range(N):\n", + " d = torch.minimum(d, d[:, k : k + 1] + d[k : k + 1, :])\n", + " output.copy_(d.view(-1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"dist\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " # 4-node directed graph: 0->1:5, 0->3:10, 1->2:3, 2->3:1\n", + " # Shortest paths: 0->2 = 8 (via 1), 0->3 = 9 (via 1->2->3)\n", + " inf = float(\"inf\")\n", + " dist = torch.tensor(\n", + " [0.0, 5.0, inf, 10.0, inf, 0.0, 3.0, inf, inf, inf, 0.0, 1.0, inf, inf, inf, 0.0],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " )\n", + " return {\n", + " \"dist\": dist,\n", + " \"output\": torch.empty(16, device=\"cuda\", dtype=torch.float32),\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + " inf = float(\"inf\")\n", + "\n", + " def make_output(N):\n", + " return torch.empty(N * N, device=\"cuda\", dtype=torch.float32)\n", + "\n", + " # --- Edge cases ---\n", + "\n", + " # N=1: single vertex\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor([0.0], device=\"cuda\", dtype=torch.float32),\n", + " \"output\": make_output(1),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + "\n", + " # N=2: disconnected graph (no edges between vertices)\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor([0.0, inf, inf, 0.0], device=\"cuda\", dtype=torch.float32),\n", + " \"output\": make_output(2),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # N=2: bidirectional edges\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor([0.0, 3.0, 7.0, 0.0], device=\"cuda\", dtype=torch.float32),\n", + " \"output\": make_output(2),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # N=3: chain 0->1->2; shortest path 0->2 = 2+3 = 5\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor(\n", + " [0.0, 2.0, inf, inf, 0.0, 3.0, inf, inf, 0.0],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " ),\n", + " \"output\": make_output(3),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # N=4: graph with shortcut (same as example test)\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor(\n", + " [\n", + " 0.0,\n", + " 5.0,\n", + " inf,\n", + " 10.0,\n", + " inf,\n", + " 0.0,\n", + " 3.0,\n", + " inf,\n", + " inf,\n", + " inf,\n", + " 0.0,\n", + " 1.0,\n", + " inf,\n", + " inf,\n", + " inf,\n", + " 0.0,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " ),\n", + " \"output\": make_output(4),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # N=4: negative edge weights, no negative cycles (DAG: 0->1->2->3)\n", + " # 0->1: -1, 1->2: 2, 2->3: -3, 0->3: 10\n", + " # Shortest 0->2 = 1, 0->3 = -2, 1->3 = -1\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.tensor(\n", + " [\n", + " 0.0,\n", + " -1.0,\n", + " inf,\n", + " 10.0,\n", + " inf,\n", + " 0.0,\n", + " 2.0,\n", + " inf,\n", + " inf,\n", + " inf,\n", + " 0.0,\n", + " -3.0,\n", + " inf,\n", + " inf,\n", + " inf,\n", + " 0.0,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " ),\n", + " \"output\": make_output(4),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # --- Power-of-2 sizes ---\n", + " for N, seed in [(16, 1), (32, 2), (64, 3), (128, 4)]:\n", + " tests.append(\n", + " {\n", + " \"dist\": _make_graph(N, density=0.5, seed=seed),\n", + " \"output\": make_output(N),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # --- Non-power-of-2 sizes ---\n", + " for N, seed in [(30, 5), (100, 6), (255, 7)]:\n", + " tests.append(\n", + " {\n", + " \"dist\": _make_graph(N, density=0.4, seed=seed),\n", + " \"output\": make_output(N),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # --- Realistic sizes ---\n", + " for N, seed in [(512, 8)]:\n", + " tests.append(\n", + " {\n", + " \"dist\": _make_graph(N, density=0.3, seed=seed),\n", + " \"output\": make_output(N),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " # --- Special: all zero-weight edges (any path has cost 0) ---\n", + " N = 8\n", + " tests.append(\n", + " {\n", + " \"dist\": torch.zeros(N * N, device=\"cuda\", dtype=torch.float32),\n", + " \"output\": make_output(N),\n", + " \"N\": N,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " N = 2048\n", + " return {\n", + " \"dist\": _make_graph(N, density=0.3, seed=42),\n", + " \"output\": torch.empty(N * N, device=\"cuda\", dtype=torch.float32),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/74_gpt2_block.ipynb b/challenges/colab_exports/hard/74_gpt2_block.ipynb new file mode 100644 index 00000000..53b679e1 --- /dev/null +++ b/challenges/colab_exports/hard/74_gpt2_block.ipynb @@ -0,0 +1,565 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a single GPT-2 transformer decoder block. Given an input tensor\n $x$ of shape (seq_len, 768) and a packed weight buffer containing\n all block parameters, compute the output using pre-norm architecture with\n multi-head self-attention and a feed-forward network with GELU activation.\n

    \n\n\n \n \n \n \n \n\n \n x (seq_len, 768)\n\n \n \n\n \n \n \n \n residual\n\n \n \n LayerNorm 1\n \n\n \n \n QKV Projection\n \n\n \n \n Multi-Head Attention\n \n\n \n \n Output Projection\n \n\n \n \n +\n \n\n \n \n \n \n residual\n\n \n \n LayerNorm 2\n \n\n \n \n Linear (768 \u2192 3072)\n \n\n \n \n GELU\n \n\n \n \n Linear (3072 \u2192 768)\n \n\n \n \n +\n \n\n \n output (seq_len, 768)\n\n\n

    The block uses GPT-2's pre-norm architecture: LayerNorm is applied\nbefore each sub-layer (attention and feed-forward), not after. At a high level:

    \n\n$$\n\\begin{aligned}\nx' &= x + \\text{MultiHeadAttn}\\!\\left(\\text{LN}_1(x)\\right) \\$$4pt]\n\\text{output} &= x' + \\text{FeedForward}\\!\\left(\\text{LN}_2(x')\\right)\n\\end{aligned}\n$$\n\n

    where the sub-layers are defined as:

    \n\n$$\n\\begin{aligned}\n\\text{LN}(z) &= \\frac{z - \\mu}{\\sqrt{\\sigma^2 + \\epsilon}} \\odot \\gamma + \\beta, \\quad \\mu = \\frac{1}{d}\\sum_i z_i, \\quad \\sigma^2 = \\frac{1}{d}\\sum_i (z_i - \\mu)^2 \\$$8pt]\n[Q \\mid K \\mid V] &= \\text{LN}_1(x) \\cdot W_{qkv} + b_{qkv} \\$$4pt]\n\\text{head}_i &= \\text{softmax}\\!\\left(\\frac{Q_i K_i^\\top}{\\sqrt{d_k}}\\right) V_i, \\quad d_k = 64 \\$$4pt]\n\\text{MultiHeadAttn}(z) &= \\text{Concat}(\\text{head}_1, \\ldots, \\text{head}_{12}) \\cdot W_{\\text{attn}} + b_{\\text{attn}} \\$$8pt]\n\\text{FeedForward}(z) &= \\text{GELU}\\!\\left(z \\cdot W_{fc} + b_{fc}\\right) \\cdot W_{\\text{proj}} + b_{\\text{proj}}\n\\end{aligned}\n$$\n\n

    Expanding into individual steps:

    \n\n
      \n
    1. Layer Norm 1: $x_{\\text{norm}} = \\text{LN}_1(x)$ with parameters $\\gamma_1, \\beta_1$
    2. \n
    3. QKV Projection: $QKV = x_{\\text{norm}} \\cdot W_{qkv} + b_{qkv}$, split into $Q, K, V$ each of shape (seq_len, 768)
    4. \n
    5. Multi-Head Attention: Reshape $Q, K, V$ into 12 heads of dimension 64, compute per-head scaled dot-product attention (no causal mask), then concatenate heads into $A$
    6. \n
    7. Output Projection: $P = A \\cdot W_{\\text{attn}} + b_{\\text{attn}}$
    8. \n
    9. Residual 1: $x' = x + P$
    10. \n
    11. Layer Norm 2: $h_{\\text{norm}} = \\text{LN}_2(x')$ with parameters $\\gamma_2, \\beta_2$
    12. \n
    13. Feed-Forward: $F = \\text{GELU}(h_{\\text{norm}} \\cdot W_{fc} + b_{fc}) \\cdot W_{\\text{proj}} + b_{\\text{proj}}$
    14. \n
    15. Residual 2: $\\text{output} = x' + F$
    16. \n
    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output tensor
    • \n
    • LayerNorm uses $\\epsilon = 10^{-5}$
    • \n
    • Use the GELU tanh approximation: $\\text{GELU}(x) = 0.5\\,x\\!\\left(1 + \\tanh\\!\\left(\\sqrt{\\tfrac{2}{\\pi}}\\left(x + 0.044715\\,x^3\\right)\\right)\\right)$
    • \n
    \n\n

    Weight Layout

    \n

    All block parameters are packed into a single contiguous weights buffer\n(7,087,872 floats) in the following order. Index into the buffer using the offsets below\n(e.g. $W_{qkv}[i][j]$ is at weights[1536 + i * 2304 + j]).\nAll 2D matrices are stored in row-major order.

    \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ParameterShapeSizeOffset
    $\\gamma_1$ (LN1 weight)(768,)7680
    $\\beta_1$ (LN1 bias)(768,)768768
    $W_{qkv}$(768, 2304)1,769,4721,536
    $b_{qkv}$(2304,)2,3041,771,008
    $W_{\\text{attn}}$(768, 768)589,8241,773,312
    $b_{\\text{attn}}$(768,)7682,363,136
    $\\gamma_2$ (LN2 weight)(768,)7682,363,904
    $\\beta_2$ (LN2 bias)(768,)7682,364,672
    $W_{fc}$(768, 3072)2,359,2962,365,440
    $b_{fc}$(3072,)3,0724,724,736
    $W_{\\text{proj}}$(3072, 768)2,359,2964,727,808
    $b_{\\text{proj}}$(768,)7687,087,104
    \n\n

    Example

    \n

    With seq_len = 4, x uniformly drawn from [\u22121, 1], and weights randomly initialized\n(see Weight Layout for the packing structure):

    \n
    \nInput:  x.shape       = (4, 768)       # 4 token embeddings\n        weights.shape = (7,087,872,)   # packed weight buffer\n        seq_len       = 4\nOutput: output.shape  = (4, 768)       # transformed token embeddings\n
    \n\n

    Constraints

    \n
      \n
    • d_model = 768, n_heads = 12, ffn_dim = 3,072 (GPT-2 124M architecture)
    • \n
    • 1 ≤ seq_len ≤ 4,096
    • \n
    • All tensors use 32-bit floating point
    • \n
    • Performance is measured with seq_len = 1,024
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// x, output, weights are device pointers\nextern \"C\" void solve(const float* x, float* output, const float* weights, int seq_len) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, output, weights are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n output: cute.Tensor,\n weights: cute.Tensor,\n seq_len: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, weights are tensors on GPU\n@jax.jit\ndef solve(x: jax.Array, weights: jax.Array, seq_len: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# x, output, weights are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n weights: UnsafePointer[Float32, MutExternalOrigin],\n seq_len: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, output, weights are tensors on the GPU\ndef solve(x: torch.Tensor, output: torch.Tensor, weights: torch.Tensor, seq_len: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, output, weights are tensors on the GPU\ndef solve(x: torch.Tensor, output: torch.Tensor, weights: torch.Tensor, seq_len: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "# GPT-2 124M fixed dimensions\n", + "D = 768\n", + "H = 12\n", + "DH = D // H # 64\n", + "FFN = 3072\n", + "\n", + "# Weight layout offsets in the packed buffer\n", + "O_LN1_W = 0\n", + "O_LN1_B = O_LN1_W + D\n", + "O_WQKV = O_LN1_B + D\n", + "O_BQKV = O_WQKV + D * 3 * D\n", + "O_WAPROJ = O_BQKV + 3 * D\n", + "O_BAPROJ = O_WAPROJ + D * D\n", + "O_LN2_W = O_BAPROJ + D\n", + "O_LN2_B = O_LN2_W + D\n", + "O_WFC = O_LN2_B + D\n", + "O_BFC = O_WFC + D * FFN\n", + "O_WPROJ = O_BFC + FFN\n", + "O_BPROJ = O_WPROJ + FFN * D\n", + "TOTAL_WEIGHTS = O_BPROJ + D\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"GPT-2 Transformer Block\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " output: torch.Tensor,\n", + " weights: torch.Tensor,\n", + " seq_len: int,\n", + " ):\n", + " assert x.shape == (seq_len, D)\n", + " assert output.shape == (seq_len, D)\n", + " assert weights.shape == (TOTAL_WEIGHTS,)\n", + " assert x.dtype == output.dtype == weights.dtype\n", + " assert x.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + " assert weights.device.type == \"cuda\"\n", + "\n", + " # unpack weights\n", + " ln1_w = weights[O_LN1_W:O_LN1_B]\n", + " ln1_b = weights[O_LN1_B:O_WQKV]\n", + " W_qkv = weights[O_WQKV:O_BQKV].view(D, 3 * D)\n", + " b_qkv = weights[O_BQKV:O_WAPROJ]\n", + " W_attn = weights[O_WAPROJ:O_BAPROJ].view(D, D)\n", + " b_attn = weights[O_BAPROJ:O_LN2_W]\n", + " ln2_w = weights[O_LN2_W:O_LN2_B]\n", + " ln2_b = weights[O_LN2_B:O_WFC]\n", + " W_fc = weights[O_WFC:O_BFC].view(D, FFN)\n", + " b_fc = weights[O_BFC:O_WPROJ]\n", + " W_proj = weights[O_WPROJ:O_BPROJ].view(FFN, D)\n", + " b_proj = weights[O_BPROJ : O_BPROJ + D]\n", + "\n", + " # layer norm 1\n", + " x_norm = F.layer_norm(x, [D], ln1_w, ln1_b, eps=1e-5)\n", + "\n", + " # qkv projection\n", + " qkv = x_norm @ W_qkv + b_qkv\n", + " q, k, v = qkv.split(D, dim=-1)\n", + "\n", + " # reshape for multi-head attention: (H, seq_len, DH)\n", + " q = q.view(seq_len, H, DH).transpose(0, 1)\n", + " k = k.view(seq_len, H, DH).transpose(0, 1)\n", + " v = v.view(seq_len, H, DH).transpose(0, 1)\n", + "\n", + " # scaled dot-product attention\n", + " scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(DH)\n", + " attn_weights = torch.softmax(scores, dim=-1)\n", + " attn_out = torch.matmul(attn_weights, v)\n", + "\n", + " # concat heads and project\n", + " attn_out = attn_out.transpose(0, 1).contiguous().view(seq_len, D)\n", + " attn_proj = attn_out @ W_attn + b_attn\n", + "\n", + " # residual connection 1\n", + " hidden = x + attn_proj\n", + "\n", + " # layer norm 2\n", + " h_norm = F.layer_norm(hidden, [D], ln2_w, ln2_b, eps=1e-5)\n", + "\n", + " # ffn: linear -> gelu (tanh approx) -> linear\n", + " fc = h_norm @ W_fc + b_fc\n", + " fc = F.gelu(fc, approximate=\"tanh\")\n", + " proj = fc @ W_proj + b_proj\n", + "\n", + " # residual connection 2\n", + " output.copy_(hidden + proj)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"weights\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_weights(self, device, dtype):\n", + " scale = 0.02\n", + " ln1_w = torch.empty(D, device=device, dtype=dtype).uniform_(0.8, 1.2)\n", + " ln1_b = torch.empty(D, device=device, dtype=dtype).uniform_(-0.1, 0.1)\n", + " W_qkv = torch.empty(D, 3 * D, device=device, dtype=dtype).normal_(0, scale)\n", + " b_qkv = torch.zeros(3 * D, device=device, dtype=dtype)\n", + " W_attn = torch.empty(D, D, device=device, dtype=dtype).normal_(0, scale)\n", + " b_attn = torch.zeros(D, device=device, dtype=dtype)\n", + " ln2_w = torch.empty(D, device=device, dtype=dtype).uniform_(0.8, 1.2)\n", + " ln2_b = torch.empty(D, device=device, dtype=dtype).uniform_(-0.1, 0.1)\n", + " W_fc = torch.empty(D, FFN, device=device, dtype=dtype).normal_(0, scale)\n", + " b_fc = torch.zeros(FFN, device=device, dtype=dtype)\n", + " W_proj = torch.empty(FFN, D, device=device, dtype=dtype).normal_(0, scale)\n", + " b_proj = torch.zeros(D, device=device, dtype=dtype)\n", + " return torch.cat(\n", + " [\n", + " ln1_w,\n", + " ln1_b,\n", + " W_qkv.flatten(),\n", + " b_qkv,\n", + " W_attn.flatten(),\n", + " b_attn,\n", + " ln2_w,\n", + " ln2_b,\n", + " W_fc.flatten(),\n", + " b_fc,\n", + " W_proj.flatten(),\n", + " b_proj,\n", + " ]\n", + " )\n", + "\n", + " def _make_test_case(self, seq_len, zero_x=False):\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " weights = self._make_weights(device, dtype)\n", + " if zero_x:\n", + " x = torch.zeros(seq_len, D, device=device, dtype=dtype)\n", + " else:\n", + " x = torch.empty(seq_len, D, device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " return {\n", + " \"x\": x,\n", + " \"output\": torch.empty(seq_len, D, device=device, dtype=dtype),\n", + " \"weights\": weights,\n", + " \"seq_len\": seq_len,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " return self._make_test_case(4)\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + " # single token\n", + " tests.append(self._make_test_case(1))\n", + " # zero input\n", + " tests.append(self._make_test_case(4, zero_x=True))\n", + " # small edge cases\n", + " tests.append(self._make_test_case(2))\n", + " tests.append(self._make_test_case(4))\n", + " # power-of-2\n", + " tests.append(self._make_test_case(16))\n", + " tests.append(self._make_test_case(64))\n", + " # non-power-of-2\n", + " tests.append(self._make_test_case(30))\n", + " tests.append(self._make_test_case(100))\n", + " # realistic\n", + " tests.append(self._make_test_case(128))\n", + " tests.append(self._make_test_case(256))\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " return self._make_test_case(1024)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/hard/93_llama_transformer_block.ipynb b/challenges/colab_exports/hard/93_llama_transformer_block.ipynb new file mode 100644 index 00000000..d7191c9a --- /dev/null +++ b/challenges/colab_exports/hard/93_llama_transformer_block.ipynb @@ -0,0 +1,600 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a single Llama-style transformer decoder block. Given an input tensor $x$ of shape\n (seq_len, 512), a packed weight buffer, and precomputed RoPE tables, compute the\n output using pre-norm architecture with Grouped Query Attention (GQA), Rotary Position Embeddings\n (RoPE), and a SwiGLU feed-forward network.\n

    \n\n\n \n \n \n \n \n \n\n \n x (seq_len, 512)\n\n \n \n\n \n \n \n \n residual\n\n \n \n RMSNorm 1\n \n\n \n \n QKV Projection (GQA)\n \n\n \n \n RoPE (Q and K)\n \n\n \n \n Causal Attention\n \n\n \n \n Output Projection\n \n\n \n \n +\n \n\n \n \n \n \n residual\n\n \n \n RMSNorm 2\n \n\n \n \n Gate & Up Proj (512\u21921408)\n \n\n \n \n SiLU(gate) ⊙ up\n \n\n \n \n Down Proj (1408\u2192512)\n \n\n \n \n +\n \n\n \n output (seq_len, 512)\n\n\n

    \n The block follows Llama's pre-norm architecture. Unlike GPT-2, it uses\n RMSNorm (no mean subtraction, no additive bias), Grouped Query\n Attention with 8 query heads and 2 key/value heads, Rotary Position\n Embeddings applied to Q and K, and a SwiGLU feed-forward network.\n None of the linear projections have bias terms.\n

    \n\n$$\n\\begin{aligned}\nx' &= x + \\text{Attn}\\!\\left(\\text{RMSNorm}_1(x),\\; \\cos,\\; \\sin\\right) \\$$4pt]\n\\text{output} &= x' + \\text{FFN}\\!\\left(\\text{RMSNorm}_2(x')\\right)\n\\end{aligned}\n$$\n\n

    The sub-operations in detail:

    \n\n$$\n\\begin{aligned}\n\\text{RMSNorm}(z, w) &= \\frac{z}{\\sqrt{\\frac{1}{d}\\sum_i z_i^2 + \\varepsilon}} \\odot w, \\quad \\varepsilon = 10^{-5} \\$$8pt]\nQ &= \\text{RMSNorm}_1(x)\\, W_Q^\\top \\in \\mathbb{R}^{T \\times 512}, \\quad \\text{reshape to } (T, 8, 64) \\$$4pt]\nK &= \\text{RMSNorm}_1(x)\\, W_K^\\top \\in \\mathbb{R}^{T \\times 128}, \\quad \\text{reshape to } (T, 2, 64) \\$$4pt]\nV &= \\text{RMSNorm}_1(x)\\, W_V^\\top \\in \\mathbb{R}^{T \\times 128}, \\quad \\text{reshape to } (T, 2, 64) \\$$8pt]\n\\text{RoPE}(q, \\cos, \\sin) &: \\quad [q_1 \\mid q_2] \\mapsto [q_1 \\odot \\cos - q_2 \\odot \\sin \\mid q_1 \\odot \\sin + q_2 \\odot \\cos] \\$$4pt]\n&\\quad q_1 = q[\\ldots, {:}32],\\; q_2 = q[\\ldots, {32:}] \\$$8pt]\n\\text{GQA} &: \\text{repeat } K,V \\text{ along head dim } 4\\times \\text{ to match 8 Q heads} \\$$4pt]\n\\text{head}_i &= \\text{softmax}\\!\\left(\\frac{Q_i K_i^\\top}{\\sqrt{64}} + M_{\\text{causal}}\\right) V_i \\$$8pt]\n\\text{Attn}(x) &= \\text{Concat}(\\text{head}_1, \\ldots, \\text{head}_8)\\; W_O^\\top \\$$8pt]\n\\text{FFN}(z) &= \\bigl(\\text{SiLU}(z\\, W_{\\text{gate}}^\\top) \\odot z\\, W_{\\text{up}}^\\top\\bigr)\\; W_{\\text{down}}^\\top\n\\end{aligned}\n$$\n\n

    where $M_{\\text{causal}}$ is the upper-triangular causal mask ($-\\infty$ above the diagonal)\nand $\\text{SiLU}(x) = x \\cdot \\sigma(x)$.

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output tensor
    • \n
    • RMSNorm uses $\\varepsilon = 10^{-5}$, no additive bias
    • \n
    • Apply causal masking: position $i$ attends only to positions $\\le i$
    • \n
    • Repeat K and V heads $4\\times$ (GQA groups) before computing attention
    • \n
    • cos and sin have shape (seq_len, 32) \u2014 apply\n them to both Q and K heads independently
    • \n
    \n\n

    Weight Layout

    \n

    All parameters are packed into a single contiguous weights buffer\n(2,819,072 floats) in the order below. All 2-D matrices are stored row-major\nwith shape (out_dim, in_dim). There are no bias terms.

    \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    ParameterShapeSizeOffset
    $w_1$ (RMSNorm 1 scale)(512,)5120
    $W_Q$(512, 512)262,144512
    $W_K$(128, 512)65,536262,656
    $W_V$(128, 512)65,536328,192
    $W_O$(512, 512)262,144393,728
    $w_2$ (RMSNorm 2 scale)(512,)512655,872
    $W_{\\text{gate}}$(1408, 512)720,896656,384
    $W_{\\text{up}}$(1408, 512)720,8961,377,280
    $W_{\\text{down}}$(512, 1408)720,8962,098,176
    \n\n

    Example

    \n

    With seq_len = 4, x drawn uniformly from [−1, 1], and randomly\ninitialized weights:

    \n
    \nInput:  x.shape       = (4, 512)       # 4 token hidden states\n        weights.shape = (2,819,072,)   # packed weight buffer\n        cos.shape     = (4, 32)        # precomputed RoPE cosines\n        sin.shape     = (4, 32)        # precomputed RoPE sines\n        seq_len       = 4\nOutput: output.shape  = (4, 512)       # transformed token hidden states\n
    \n\n

    Constraints

    \n
      \n
    • d_model = 512, n_q_heads = 8, n_kv_heads = 2,\n head_dim = 64, ffn_hidden = 1,408
    • \n
    • 1 ≤ seq_len ≤ 4,096
    • \n
    • All tensors use 32-bit floating point
    • \n
    • Performance is measured with seq_len = 2,048
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// x, output, weights, cos, sin are device pointers\nextern \"C\" void solve(const float* x, float* output, const float* weights, const float* cos,\n const float* sin, int seq_len) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, output, weights, cos, sin are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n output: cute.Tensor,\n weights: cute.Tensor,\n cos: cute.Tensor,\n sin: cute.Tensor,\n seq_len: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, weights, cos, sin are tensors on GPU\n@jax.jit\ndef solve(\n x: jax.Array,\n weights: jax.Array,\n cos: jax.Array,\n sin: jax.Array,\n seq_len: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# x, output, weights, cos, sin are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n weights: UnsafePointer[Float32, MutExternalOrigin],\n cos: UnsafePointer[Float32, MutExternalOrigin],\n sin: UnsafePointer[Float32, MutExternalOrigin],\n seq_len: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, output, weights, cos, sin are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n output: torch.Tensor,\n weights: torch.Tensor,\n cos: torch.Tensor,\n sin: torch.Tensor,\n seq_len: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, output, weights, cos, sin are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n output: torch.Tensor,\n weights: torch.Tensor,\n cos: torch.Tensor,\n sin: torch.Tensor,\n seq_len: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "# Llama-style architecture constants\n", + "D = 512 # model dimension\n", + "NUM_Q_HEADS = 8 # number of query heads\n", + "NUM_KV_HEADS = 2 # number of key/value heads (grouped query attention)\n", + "HEAD_DIM = D // NUM_Q_HEADS # = 64\n", + "Q_DIM = NUM_Q_HEADS * HEAD_DIM # = 512\n", + "KV_DIM = NUM_KV_HEADS * HEAD_DIM # = 128\n", + "GQA_GROUPS = NUM_Q_HEADS // NUM_KV_HEADS # = 4\n", + "FFN_HIDDEN = 1408 # SwiGLU intermediate dimension\n", + "\n", + "# Weight buffer layout offsets (all projections stored as (out_dim, in_dim))\n", + "O_RMS1_W = 0\n", + "O_WQ = O_RMS1_W + D # Q projection: Q_DIM x D\n", + "O_WK = O_WQ + Q_DIM * D # K projection: KV_DIM x D\n", + "O_WV = O_WK + KV_DIM * D # V projection: KV_DIM x D\n", + "O_WO = O_WV + KV_DIM * D # output projection: D x D\n", + "O_RMS2_W = O_WO + D * D # RMS norm 2 weights: D\n", + "O_WGATE = O_RMS2_W + D # gate projection: FFN_HIDDEN x D\n", + "O_WUP = O_WGATE + FFN_HIDDEN * D # up projection: FFN_HIDDEN x D\n", + "O_WDOWN = O_WUP + FFN_HIDDEN * D # down projection: D x FFN_HIDDEN\n", + "TOTAL_WEIGHTS = O_WDOWN + D * FFN_HIDDEN\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Llama Transformer Block\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " output: torch.Tensor,\n", + " weights: torch.Tensor,\n", + " cos: torch.Tensor,\n", + " sin: torch.Tensor,\n", + " seq_len: int,\n", + " ):\n", + " assert x.shape == (seq_len, D)\n", + " assert output.shape == (seq_len, D)\n", + " assert weights.shape == (TOTAL_WEIGHTS,)\n", + " assert cos.shape == (seq_len, HEAD_DIM // 2)\n", + " assert sin.shape == (seq_len, HEAD_DIM // 2)\n", + " assert x.dtype == output.dtype == weights.dtype == cos.dtype == sin.dtype\n", + " assert x.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + " assert weights.device.type == \"cuda\"\n", + " assert cos.device.type == \"cuda\"\n", + " assert sin.device.type == \"cuda\"\n", + "\n", + " def rms_norm(z, w):\n", + " return z * torch.rsqrt(z.pow(2).mean(-1, keepdim=True) + 1e-5) * w\n", + "\n", + " def apply_rope(qk, c, s):\n", + " # qk: (seq_len, num_heads, head_dim)\n", + " q1, q2 = qk[..., : HEAD_DIM // 2], qk[..., HEAD_DIM // 2 :]\n", + " c = c.unsqueeze(1) # (seq_len, 1, head_dim//2)\n", + " s = s.unsqueeze(1)\n", + " return torch.cat([q1 * c - q2 * s, q1 * s + q2 * c], dim=-1)\n", + "\n", + " # unpack weights\n", + " rms1_w = weights[O_RMS1_W:O_WQ]\n", + " W_Q = weights[O_WQ:O_WK].view(Q_DIM, D)\n", + " W_K = weights[O_WK:O_WV].view(KV_DIM, D)\n", + " W_V = weights[O_WV:O_WO].view(KV_DIM, D)\n", + " W_O = weights[O_WO:O_RMS2_W].view(D, D)\n", + " rms2_w = weights[O_RMS2_W:O_WGATE]\n", + " W_gate = weights[O_WGATE:O_WUP].view(FFN_HIDDEN, D)\n", + " W_up = weights[O_WUP:O_WDOWN].view(FFN_HIDDEN, D)\n", + " W_down = weights[O_WDOWN:TOTAL_WEIGHTS].view(D, FFN_HIDDEN)\n", + "\n", + " # --- Attention sub-block ---\n", + " x_norm = rms_norm(x, rms1_w)\n", + "\n", + " # QKV projections\n", + " q = (x_norm @ W_Q.T).view(seq_len, NUM_Q_HEADS, HEAD_DIM)\n", + " k = (x_norm @ W_K.T).view(seq_len, NUM_KV_HEADS, HEAD_DIM)\n", + " v = (x_norm @ W_V.T).view(seq_len, NUM_KV_HEADS, HEAD_DIM)\n", + "\n", + " # Apply RoPE to Q and K\n", + " q = apply_rope(q, cos, sin)\n", + " k = apply_rope(k, cos, sin)\n", + "\n", + " # Reshape for batched matmul: (num_heads, seq_len, head_dim)\n", + " q = q.transpose(0, 1) # (NUM_Q_HEADS, seq_len, HEAD_DIM)\n", + " k = k.transpose(0, 1) # (NUM_KV_HEADS, seq_len, HEAD_DIM)\n", + " v = v.transpose(0, 1) # (NUM_KV_HEADS, seq_len, HEAD_DIM)\n", + "\n", + " # GQA: broadcast K and V to match Q heads\n", + " k = k.repeat_interleave(GQA_GROUPS, dim=0) # (NUM_Q_HEADS, seq_len, HEAD_DIM)\n", + " v = v.repeat_interleave(GQA_GROUPS, dim=0)\n", + "\n", + " # Causal scaled dot-product attention\n", + " scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(HEAD_DIM)\n", + " causal_mask = torch.triu(\n", + " torch.full((seq_len, seq_len), float(\"-inf\"), device=x.device, dtype=x.dtype),\n", + " diagonal=1,\n", + " )\n", + " scores = scores + causal_mask\n", + " attn_weights = torch.softmax(scores, dim=-1)\n", + " attn_out = torch.matmul(attn_weights, v) # (NUM_Q_HEADS, seq_len, HEAD_DIM)\n", + "\n", + " # Merge heads and project\n", + " attn_out = attn_out.transpose(0, 1).contiguous().view(seq_len, D)\n", + " attn_proj = attn_out @ W_O.T\n", + "\n", + " # Residual 1\n", + " hidden = x + attn_proj\n", + "\n", + " # --- FFN sub-block ---\n", + " h_norm = rms_norm(hidden, rms2_w)\n", + "\n", + " # SwiGLU: gate * up, then project down\n", + " gate = F.silu(h_norm @ W_gate.T)\n", + " up = h_norm @ W_up.T\n", + " ffn_out = (gate * up) @ W_down.T\n", + "\n", + " # Residual 2\n", + " output.copy_(hidden + ffn_out)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"weights\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"cos\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"sin\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_rope_tables(self, seq_len, device, dtype, theta=10000.0):\n", + " positions = torch.arange(seq_len, device=device, dtype=dtype)\n", + " freqs = 1.0 / (\n", + " theta ** (torch.arange(0, HEAD_DIM, 2, device=device, dtype=dtype) / HEAD_DIM)\n", + " )\n", + " angles = torch.outer(positions, freqs) # (seq_len, HEAD_DIM//2)\n", + " return angles.cos(), angles.sin()\n", + "\n", + " def _make_weights(self, device, dtype):\n", + " scale = 0.02\n", + " rms1_w = torch.empty(D, device=device, dtype=dtype).uniform_(0.8, 1.2)\n", + " W_Q = torch.empty(Q_DIM, D, device=device, dtype=dtype).normal_(0, scale)\n", + " W_K = torch.empty(KV_DIM, D, device=device, dtype=dtype).normal_(0, scale)\n", + " W_V = torch.empty(KV_DIM, D, device=device, dtype=dtype).normal_(0, scale)\n", + " W_O = torch.empty(D, D, device=device, dtype=dtype).normal_(0, scale)\n", + " rms2_w = torch.empty(D, device=device, dtype=dtype).uniform_(0.8, 1.2)\n", + " W_gate = torch.empty(FFN_HIDDEN, D, device=device, dtype=dtype).normal_(0, scale)\n", + " W_up = torch.empty(FFN_HIDDEN, D, device=device, dtype=dtype).normal_(0, scale)\n", + " W_down = torch.empty(D, FFN_HIDDEN, device=device, dtype=dtype).normal_(0, scale)\n", + " return torch.cat(\n", + " [\n", + " rms1_w,\n", + " W_Q.flatten(),\n", + " W_K.flatten(),\n", + " W_V.flatten(),\n", + " W_O.flatten(),\n", + " rms2_w,\n", + " W_gate.flatten(),\n", + " W_up.flatten(),\n", + " W_down.flatten(),\n", + " ]\n", + " )\n", + "\n", + " def _make_test_case(self, seq_len, zero_x=False):\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " weights = self._make_weights(device, dtype)\n", + " cos, sin = self._make_rope_tables(seq_len, device, dtype)\n", + " if zero_x:\n", + " x = torch.zeros(seq_len, D, device=device, dtype=dtype)\n", + " else:\n", + " x = torch.empty(seq_len, D, device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " return {\n", + " \"x\": x,\n", + " \"output\": torch.empty(seq_len, D, device=device, dtype=dtype),\n", + " \"weights\": weights,\n", + " \"cos\": cos,\n", + " \"sin\": sin,\n", + " \"seq_len\": seq_len,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " return self._make_test_case(4)\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + " # single token (decode phase)\n", + " tests.append(self._make_test_case(1))\n", + " # zero input\n", + " tests.append(self._make_test_case(4, zero_x=True))\n", + " # small edge cases\n", + " tests.append(self._make_test_case(2))\n", + " tests.append(self._make_test_case(4))\n", + " # power-of-2\n", + " tests.append(self._make_test_case(16))\n", + " tests.append(self._make_test_case(64))\n", + " # non-power-of-2\n", + " tests.append(self._make_test_case(30))\n", + " tests.append(self._make_test_case(100))\n", + " # realistic inference lengths\n", + " tests.append(self._make_test_case(128))\n", + " tests.append(self._make_test_case(256))\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " return self._make_test_case(2048)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/10_2d_convolution.ipynb b/challenges/colab_exports/medium/10_2d_convolution.ipynb new file mode 100644 index 00000000..1b0767c5 --- /dev/null +++ b/challenges/colab_exports/medium/10_2d_convolution.ipynb @@ -0,0 +1,561 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a program that performs a 2D convolution operation on the GPU. Given an input matrix and a kernel (filter), compute the convolved\n output. The convolution should be performed with a \"valid\" boundary condition, meaning the kernel is only applied\n where it fully overlaps with the input.\n

    \n\n\n \n\n \n Input (4x4)\n\n \n \n \n \n 1\n \n 2\n \n 3\n \n 4\n \n \n 5\n \n 6\n \n 7\n \n 8\n \n \n 9\n \n 10\n \n 11\n \n 12\n \n \n 13\n \n 14\n \n 15\n \n 16\n\n \n \n \n\n \n Kernel (2x2)\n \n \n 0\n \n 1\n \n 1\n \n 0\n \n\n \n \n \n \n \n \n \n\n \n Output[0][0]\n \n 7\n\n \n 1\u00d70 + 2\u00d71 + 5\u00d71 + 6\u00d70 = 0 + 2 + 5 + 0 =\n 7\n\n \n \n kernel position\n \n output cell\n\n\n

    \n The input consists of:\n

      \n
    • input: A 2D matrix of 32-bit floating-point numbers, represented as a 1D array in row-major order.\n
    • \n
    • kernel: A 2D kernel (filter) of 32-bit floating-point numbers, also represented as a 1D array in\n row-major order.
    • \n
    \n

    \n\n

    \n The output should be written to the output matrix (also a 1D array in row-major order). The output matrix will have dimensions:\n

      \n
    • output_rows = input_rows - kernel_rows + 1
    • \n
    • output_cols = input_cols - kernel_cols + 1
    • \n
    \n

    \n\n

    \n The convolution operation is defined as:\n

    \n

    \n $output[i][j] = \\sum_{m=0}^{kernel\\_rows-1} \\sum_{n=0}^{kernel\\_cols-1} input[i+m][j+n] * kernel[m][n]$\n

    \n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the array\n output\n
    • \n
    \n\n

    Example 1:

    \n

    \nInput:
    \ninput (3\u00d73):\n$$\n\\begin{bmatrix}\n1 & 2 & 3 \\\\\n4 & 5 & 6 \\\\\n7 & 8 & 9\n\\end{bmatrix}\n$$\nkernel (2\u00d72):\n$$\n\\begin{bmatrix}\n0 & 1 \\\\\n1 & 0\n\\end{bmatrix}\n$$\ninput_rows = 3
    \ninput_cols = 3
    \nkernel_rows = 2
    \nkernel_cols = 2\n

    \n\n

    \nOutput:
    \noutput (2\u00d72):\n$$\n\\begin{bmatrix}\n6 & 8 \\\\\n12 & 14\n\\end{bmatrix}\n$$\n

    \n\n

    Example 2:

    \n

    \nInput:
    \ninput (4\u00d74):\n$$\n\\begin{bmatrix}\n1 & 1 & 1 & 1 \\\\\n1 & 2 & 3 & 1 \\\\\n1 & 4 & 5 & 1 \\\\\n1 & 1 & 1 & 1\n\\end{bmatrix}\n$$\nkernel (1\u00d73):\n$$\n\\begin{bmatrix}\n1 & 0 & 1\n\\end{bmatrix}\n$$\ninput_rows = 4
    \ninput_cols = 4
    \nkernel_rows = 1
    \nkernel_cols = 3\n

    \n\n

    \nOutput:
    \noutput (4\u00d72):\n$$\n\\begin{bmatrix}\n2 & 2 \\\\\n4 & 3 \\\\\n6 & 5 \\\\\n2 & 2\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 input_rows, input_cols \u2264 3072
    • \n
    • 1 \u2264 kernel_rows, kernel_cols \u2264 31
    • \n
    • kernel_rows \u2264 input_rows
    • \n
    • kernel_cols \u2264 input_cols
    • \n\n
    • Performance is measured with input_cols = 3,072, input_rows = 3,072, kernel_cols = 15, kernel_rows = 15
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, kernel, output are device pointers\nextern \"C\" void solve(const float* input, const float* kernel, float* output, int input_rows,\n int input_cols, int kernel_rows, int kernel_cols) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, kernel, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n kernel: cute.Tensor,\n output: cute.Tensor,\n input_rows: cute.Int32,\n input_cols: cute.Int32,\n kernel_rows: cute.Int32,\n kernel_cols: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, kernel are tensors on the GPU\n@jax.jit\ndef solve(\n input: jax.Array,\n kernel: jax.Array,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n kernel: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n input_rows: Int32,\n input_cols: Int32,\n kernel_rows: Int32,\n kernel_cols: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"2D Convolution\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " kernel: torch.Tensor,\n", + " output: torch.Tensor,\n", + " input_rows: int,\n", + " input_cols: int,\n", + " kernel_rows: int,\n", + " kernel_cols: int,\n", + " ):\n", + " # Reshape flattened arrays to 2D matrices\n", + " input_2d = input.view(input_rows, input_cols)\n", + " kernel_2d = kernel.view(kernel_rows, kernel_cols)\n", + " # Prepare tensors for conv2d (add batch and channel dimensions)\n", + " kernel_prepared = kernel_2d.unsqueeze(0).unsqueeze(0)\n", + " input_prepared = input_2d.unsqueeze(0).unsqueeze(0)\n", + " # Perform cross-correlation using PyTorch's F.conv2d\n", + " # (which does cross-correlation by default)\n", + " result = torch.nn.functional.conv2d(input_prepared, kernel_prepared, padding=0)\n", + " # Copy result to output tensor (removing the extra dimensions and flattening)\n", + " output.copy_(result.view(-1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"kernel\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"input_rows\": (ctypes.c_int, \"in\"),\n", + " \"input_cols\": (ctypes.c_int, \"in\"),\n", + " \"kernel_rows\": (ctypes.c_int, \"in\"),\n", + " \"kernel_cols\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " kernel = torch.tensor([0.0, 1.0, 1.0, 0.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(4, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"kernel\": kernel,\n", + " \"output\": output,\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"kernel\": torch.tensor([0.0, 1.0, 1.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + " # rectangular_input\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=\"cuda\", dtype=dtype),\n", + " \"kernel\": torch.tensor([1.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 2,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 1,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + " # negative_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"kernel\": torch.tensor([-1.0, 1.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + " # single_element_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"kernel\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(9, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 1,\n", + " \"kernel_cols\": 1,\n", + " }\n", + " )\n", + " # medium_matrix_small_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(64 * 64, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"kernel\": torch.empty(3 * 3, device=\"cuda\", dtype=dtype).uniform_(-0.5, 0.5),\n", + " \"output\": torch.empty(62 * 62, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 64,\n", + " \"input_cols\": 64,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + " # large_matrix_medium_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(128 * 128, device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0),\n", + " \"kernel\": torch.empty(7 * 7, device=\"cuda\", dtype=dtype).uniform_(-0.2, 0.2),\n", + " \"output\": torch.empty(122 * 122, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 128,\n", + " \"input_cols\": 128,\n", + " \"kernel_rows\": 7,\n", + " \"kernel_cols\": 7,\n", + " }\n", + " )\n", + " # rectangular_large_matrix\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(128 * 256, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"kernel\": torch.empty(5 * 5, device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(124 * 252, device=\"cuda\", dtype=dtype),\n", + " \"input_rows\": 128,\n", + " \"input_cols\": 256,\n", + " \"kernel_rows\": 5,\n", + " \"kernel_cols\": 5,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_rows = 3072\n", + " input_cols = 3072\n", + " kernel_rows = 15\n", + " kernel_cols = 15\n", + " input = torch.empty(input_rows * input_cols, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " kernel = torch.empty(kernel_rows * kernel_cols, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1.0, 1.0\n", + " )\n", + " output_rows = input_rows - kernel_rows + 1\n", + " output_cols = input_cols - kernel_cols + 1\n", + " output = torch.empty(output_rows * output_cols, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"kernel\": kernel,\n", + " \"output\": output,\n", + " \"input_rows\": input_rows,\n", + " \"input_cols\": input_cols,\n", + " \"kernel_rows\": kernel_rows,\n", + " \"kernel_cols\": kernel_cols,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/11_3d_convolution.ipynb b/challenges/colab_exports/medium/11_3d_convolution.ipynb new file mode 100644 index 00000000..d7c1f449 --- /dev/null +++ b/challenges/colab_exports/medium/11_3d_convolution.ipynb @@ -0,0 +1,651 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that performs a 3D convolution operation. Given a 3D input volume and a 3D kernel (filter), compute the convolved\n output. The convolution should use a \"valid\" boundary condition (no padding).\n

    \n\n

    \n For a 3D convolution, the output at position $(i,j,k)$ is given by:\n

    \n\n

    \n $$\n output(i,j,k) = \\sum_{d=0}^{K_d-1} \\sum_{r=0}^{K_r-1} \\sum_{c=0}^{K_c-1} input(i+d,j+r,k+c) \\cdot kernel(d,r,c)\n $$\n

    \n\n

    \n The input consists of:\n

      \n
    • \n input: A 3D volume of 32-bit floats, as a 1D array (row-major, then depth).\n
    • \n
    • \n kernel: A 3D kernel of 32-bit floats, as a 1D array (row-major, then depth).\n
    • \n
    • \n input_depth,\n input_rows,\n input_cols: Dimensions of the input.\n
    • \n
    • \n kernel_depth,\n kernel_rows,\n kernel_cols: Dimensions of the kernel.\n
    • \n
    \n\nOutput:\n
      \n
    • \n output: A 1D array (row-major, then depth) storing the result.\n
    • \n
    \n\nOutput dimensions:\n
      \n
    • \n output_depth = input_depth - kernel_depth + 1\n
    • \n
    • \n output_rows = input_rows - kernel_rows + 1\n
    • \n
    • \n output_cols = input_cols - kernel_cols + 1\n
    • \n
    \n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in output
    • \n
    \n\n

    Examples

    \n\n

    Example 1:

    \n

    \nInput volume $V \\in \\mathbb{R}^{3 \\times 3 \\times 3}$:\n$$\n\\begin{aligned}\nV_{d=0} &= \\begin{bmatrix}\n1 & 2 & 3 \\\\\n4 & 5 & 6 \\\\\n7 & 8 & 9\n\\end{bmatrix} \\\\\nV_{d=1} &= \\begin{bmatrix}\n10 & 11 & 12 \\\\\n13 & 14 & 15 \\\\\n16 & 17 & 18\n\\end{bmatrix} \\\\\nV_{d=2} &= \\begin{bmatrix}\n19 & 20 & 21 \\\\\n22 & 23 & 24 \\\\\n25 & 26 & 27\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nKernel $K \\in \\mathbb{R}^{2 \\times 3 \\times 3}$:\n$$\n\\begin{aligned}\nK_{d=0} &= \\begin{bmatrix}\n1 & 0 & 0 \\\\\n1 & 1 & 1 \\\\\n0 & 0 & 0\n\\end{bmatrix} \\\\\nK_{d=1} &= \\begin{bmatrix}\n1 & 1 & 0 \\\\\n1 & 1 & 0 \\\\\n0 & 0 & 1\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nOutput $O \\in \\mathbb{R}^{2 \\times 1 \\times 1}$:\n$$\n[82, 163]\n$$\n

    \n\n

    Example 2:

    \n

    \nInput volume $V \\in \\mathbb{R}^{2 \\times 2 \\times 2}$:\n$$\n\\begin{aligned}\nV_{d=0} &= \\begin{bmatrix}\n1 & 2 \\\\\n3 & 4\n\\end{bmatrix} \\\\\nV_{d=1} &= \\begin{bmatrix}\n5 & 6 \\\\\n7 & 8\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nKernel $K \\in \\mathbb{R}^{2 \\times 2 \\times 2}$:\n$$\n\\begin{aligned}\nK_{d=0} &= \\begin{bmatrix}\n1 & 1 \\\\\n1 & 1\n\\end{bmatrix} \\\\\nK_{d=1} &= \\begin{bmatrix}\n1 & 1 \\\\\n1 & 1\n\\end{bmatrix}\n\\end{aligned}\n$$\n\nOutput $O \\in \\mathbb{R}^{1 \\times 1 \\times 1}$:\n$$\n[36]\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 \u2264\n input_depth,\n input_rows,\n input_cols \u2264 256\n
    • \n
    • 1 \u2264\n kernel_depth,\n kernel_rows,\n kernel_cols \u2264 5\n
    • \n
    • \n kernel_depth \u2264\n input_depth\n
    • \n
    • \n kernel_rows \u2264\n input_rows\n
    • \n
    • \n kernel_cols \u2264\n input_cols\n
    • \n\n
    • Performance is measured with input_cols = 128, input_rows = 128, kernel_cols = 5, kernel_rows = 5
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, kernel, output are device pointers\nextern \"C\" void solve(const float* input, const float* kernel, float* output, int input_depth,\n int input_rows, int input_cols, int kernel_depth, int kernel_rows,\n int kernel_cols) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, kernel, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n kernel: cute.Tensor,\n output: cute.Tensor,\n input_depth: cute.Int32,\n input_rows: cute.Int32,\n input_cols: cute.Int32,\n kernel_depth: cute.Int32,\n kernel_rows: cute.Int32,\n kernel_cols: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, kernel are tensors on the GPU\n@jax.jit\ndef solve(\n input: jax.Array,\n kernel: jax.Array,\n input_depth: int,\n input_rows: int,\n input_cols: int,\n kernel_depth: int,\n kernel_rows: int,\n kernel_cols: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n kernel: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n input_depth: Int32,\n input_rows: Int32,\n input_cols: Int32,\n kernel_depth: Int32,\n kernel_rows: Int32,\n kernel_cols: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_depth: int,\n input_rows: int,\n input_cols: int,\n kernel_depth: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_depth: int,\n input_rows: int,\n input_cols: int,\n kernel_depth: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"3D Convolution\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " kernel: torch.Tensor,\n", + " output: torch.Tensor,\n", + " input_depth: int,\n", + " input_rows: int,\n", + " input_cols: int,\n", + " kernel_depth: int,\n", + " kernel_rows: int,\n", + " kernel_cols: int,\n", + " ):\n", + " assert input.shape == (input_depth, input_rows, input_cols)\n", + " assert kernel.shape == (kernel_depth, kernel_rows, kernel_cols)\n", + " assert output.shape == (\n", + " input_depth - kernel_depth + 1,\n", + " input_rows - kernel_rows + 1,\n", + " input_cols - kernel_cols + 1,\n", + " )\n", + " assert input.dtype == kernel.dtype == output.dtype\n", + " assert input.device == kernel.device == output.device\n", + "\n", + " input_expanded = input.unsqueeze(0).unsqueeze(0)\n", + " kernel_expanded = kernel.unsqueeze(0).unsqueeze(0)\n", + "\n", + " result = torch.nn.functional.conv3d(\n", + " input_expanded, kernel_expanded, bias=None, stride=1, padding=0\n", + " )\n", + "\n", + " output.copy_(result.squeeze(0).squeeze(0))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"kernel\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"input_depth\": (ctypes.c_int, \"in\"),\n", + " \"input_rows\": (ctypes.c_int, \"in\"),\n", + " \"input_cols\": (ctypes.c_int, \"in\"),\n", + " \"kernel_depth\": (ctypes.c_int, \"in\"),\n", + " \"kernel_rows\": (ctypes.c_int, \"in\"),\n", + " \"kernel_cols\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_tensor = torch.tensor(\n", + " [\n", + " [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n", + " [[10, 11, 12], [13, 14, 15], [16, 17, 18]],\n", + " [[19, 20, 21], [22, 23, 24], [25, 26, 27]],\n", + " ],\n", + " dtype=dtype,\n", + " device=\"cuda\",\n", + " )\n", + " kernel_tensor = torch.tensor(\n", + " [[[1, 0, 0], [1, 1, 1], [0, 0, 0]], [[1, 1, 0], [1, 1, 0], [0, 0, 1]]],\n", + " dtype=dtype,\n", + " device=\"cuda\",\n", + " )\n", + " output_tensor = torch.empty((2, 1, 1), device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"kernel\": kernel_tensor,\n", + " \"output\": output_tensor,\n", + " \"input_depth\": 3,\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_depth\": 2,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [\n", + " [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n", + " [[10, 11, 12], [13, 14, 15], [16, 17, 18]],\n", + " [[19, 20, 21], [22, 23, 24], [25, 26, 27]],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"kernel\": torch.tensor(\n", + " [[[1, 0, 0], [1, 1, 1], [0, 0, 0]], [[1, 1, 0], [1, 1, 0], [0, 0, 1]]],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"output\": torch.zeros((2, 1, 1), dtype=dtype, device=device),\n", + " \"input_depth\": 3,\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_depth\": 2,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # small_dimensions\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtype, device=device\n", + " ),\n", + " \"kernel\": torch.tensor(\n", + " [[[1, 1], [1, 1]], [[1, 1], [1, 1]]], dtype=dtype, device=device\n", + " ),\n", + " \"output\": torch.zeros((1, 1, 1), dtype=dtype, device=device),\n", + " \"input_depth\": 2,\n", + " \"input_rows\": 2,\n", + " \"input_cols\": 2,\n", + " \"kernel_depth\": 2,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + "\n", + " # unit_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtype, device=device\n", + " ),\n", + " \"kernel\": torch.tensor([[[2]]], dtype=dtype, device=device),\n", + " \"output\": torch.zeros((2, 2, 2), dtype=dtype, device=device),\n", + " \"input_depth\": 2,\n", + " \"input_rows\": 2,\n", + " \"input_cols\": 2,\n", + " \"kernel_depth\": 1,\n", + " \"kernel_rows\": 1,\n", + " \"kernel_cols\": 1,\n", + " }\n", + " )\n", + "\n", + " # zero_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=dtype, device=device\n", + " ),\n", + " \"kernel\": torch.zeros((2, 2, 2), dtype=dtype, device=device),\n", + " \"output\": torch.zeros((1, 1, 1), dtype=dtype, device=device),\n", + " \"input_depth\": 2,\n", + " \"input_rows\": 2,\n", + " \"input_cols\": 2,\n", + " \"kernel_depth\": 2,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + "\n", + " # negative_values\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[[-1, -2], [3, -4]], [[5, -6], [7, -8]]], dtype=dtype, device=device\n", + " ),\n", + " \"kernel\": torch.tensor([[[-1, 1], [-1, 1]]], dtype=dtype, device=device),\n", + " \"output\": torch.zeros((2, 1, 1), dtype=dtype, device=device),\n", + " \"input_depth\": 2,\n", + " \"input_rows\": 2,\n", + " \"input_cols\": 2,\n", + " \"kernel_depth\": 1,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 2,\n", + " }\n", + " )\n", + "\n", + " # rectangular_dimensions\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [\n", + " [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],\n", + " [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"kernel\": torch.tensor([[[1, 1, 1], [1, 1, 1]]], dtype=dtype, device=device),\n", + " \"output\": torch.zeros((2, 2, 2), dtype=dtype, device=device),\n", + " \"input_depth\": 2,\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 4,\n", + " \"kernel_depth\": 1,\n", + " \"kernel_rows\": 2,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # power_of_two_dimensions\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(4, 4, 4, device=device, dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"kernel\": torch.empty(3, 3, 3, device=device, dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.zeros((2, 2, 2), dtype=dtype, device=device),\n", + " \"input_depth\": 4,\n", + " \"input_rows\": 4,\n", + " \"input_cols\": 4,\n", + " \"kernel_depth\": 3,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(10, 10, 10, device=device, dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"kernel\": torch.empty(3, 4, 5, device=device, dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.zeros((8, 7, 6), dtype=dtype, device=device),\n", + " \"input_depth\": 10,\n", + " \"input_rows\": 10,\n", + " \"input_cols\": 10,\n", + " \"kernel_depth\": 3,\n", + " \"kernel_rows\": 4,\n", + " \"kernel_cols\": 5,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_depth, input_rows, input_cols = 256, 128, 128\n", + " kernel_depth, kernel_rows, kernel_cols = 5, 5, 5\n", + " input_tensor = torch.empty(\n", + " input_depth, input_rows, input_cols, device=\"cuda\", dtype=dtype\n", + " ).uniform_(-1.0, 1.0)\n", + " kernel_tensor = torch.empty(\n", + " kernel_depth, kernel_rows, kernel_cols, device=\"cuda\", dtype=dtype\n", + " ).uniform_(-1.0, 1.0)\n", + " output_tensor = torch.zeros(\n", + " input_depth - kernel_depth + 1,\n", + " input_rows - kernel_rows + 1,\n", + " input_cols - kernel_cols + 1,\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " return {\n", + " \"input\": input_tensor,\n", + " \"kernel\": kernel_tensor,\n", + " \"output\": output_tensor,\n", + " \"input_depth\": input_depth,\n", + " \"input_rows\": input_rows,\n", + " \"input_cols\": input_cols,\n", + " \"kernel_depth\": kernel_depth,\n", + " \"kernel_rows\": kernel_rows,\n", + " \"kernel_cols\": kernel_cols,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/13_histogramming.ipynb b/challenges/colab_exports/medium/13_histogramming.ipynb new file mode 100644 index 00000000..94a45202 --- /dev/null +++ b/challenges/colab_exports/medium/13_histogramming.ipynb @@ -0,0 +1,493 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that computes the histogram of an array of 32-bit integers.\n The histogram should count the number of occurrences of each integer value in the range [0, num_bins).\n You are given an input array input of length N and the number of bins num_bins.\n

    \n\n

    \n The result should be an array of integers of length\nnum_bins, where each element represents\nthe count of occurrences of its corresponding index in the input array.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the\n histogram array.\n
    • \n
    \n\n

    Examples

    \n\n
    \nInput: input = [0, 1, 2, 1, 0],  N = 5, num_bins = 3\nOutput: [2, 2, 1]\n\nInput: input = [3, 3, 3, 3], N = 4, num_bins = 5\nOutput: [0, 0, 0, 4, 0]\n
    \n\n

    Constraints

    \n\n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • 0 ≤ input[i] < num_bins
    • \n
    • 1 ≤ num_bins ≤ 1024
    • \n\n
    • Performance is measured with N = 50,000,000, num_bins = 256
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, histogram are device pointers\nextern \"C\" void solve(const int* input, int* histogram, int N, int num_bins) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, histogram are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, histogram: cute.Tensor, N: cute.Int32, num_bins: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, num_bins: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n histogram: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n num_bins: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, histogram are tensors on the GPU\ndef solve(input: torch.Tensor, histogram: torch.Tensor, N: int, num_bins: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, histogram are tensors on the GPU\ndef solve(input: torch.Tensor, histogram: torch.Tensor, N: int, num_bins: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Histogramming\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, histogram: torch.Tensor, N: int, num_bins: int):\n", + " # Validate input types and shapes\n", + " assert input.dtype == torch.int32\n", + " assert histogram.dtype == torch.int32\n", + " assert input.numel() == N\n", + " assert histogram.numel() == num_bins\n", + " # Zero out the histogram\n", + " histogram.zero_()\n", + " # Only count valid input values\n", + " valid_mask = (input >= 0) & (input < num_bins)\n", + " valid_input = input[valid_mask]\n", + " counts = torch.bincount(valid_input, minlength=num_bins)\n", + " histogram.copy_(counts)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"histogram\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"num_bins\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([0, 1, 2, 1, 0], device=\"cuda\", dtype=dtype)\n", + " histogram = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"histogram\": histogram,\n", + " \"N\": 5,\n", + " \"num_bins\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([0, 1, 2, 1, 0], device=\"cuda\", dtype=dtype),\n", + " \"histogram\": torch.zeros(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " \"num_bins\": 3,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2] * 16, device=\"cuda\", dtype=dtype),\n", + " \"histogram\": torch.zeros(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " \"num_bins\": 5,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(0, 4, (32,), device=\"cuda\", dtype=dtype),\n", + " \"histogram\": torch.zeros(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 32,\n", + " \"num_bins\": 4,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(0, 10, (1000,), device=\"cuda\", dtype=dtype),\n", + " \"histogram\": torch.zeros(10, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"num_bins\": 10,\n", + " }\n", + " )\n", + "\n", + " # large_multi_block\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(0, 128, (10000,), device=\"cuda\", dtype=dtype),\n", + " \"histogram\": torch.zeros(128, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " \"num_bins\": 128,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(0, 256, (50000000,), device=\"cuda\", dtype=dtype)\n", + " histogram = torch.zeros(256, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"histogram\": histogram,\n", + " \"N\": 50000000,\n", + " \"num_bins\": 256,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/16_prefix_sum.ipynb b/challenges/colab_exports/medium/16_prefix_sum.ipynb new file mode 100644 index 00000000..532c31e9 --- /dev/null +++ b/challenges/colab_exports/medium/16_prefix_sum.ipynb @@ -0,0 +1,480 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that computes the prefix sum (cumulative sum) of an array of 32-bit floating point numbers.\n For an input array [a, b, c, d, ...], the prefix sum is [a, a+b, a+b+c, a+b+c+d, ...].\n

    \n\n\n \n \n \n \n \n \n \n input\n prefix\n sum\n \n \n \n 1\n \n 2\n \n 3\n \n 4\n \n 5\n \n 6\n \n 7\n \n 8\n \n \n \n \n 1\n \n 3\n \n 6\n \n 10\n \n 15\n \n 21\n \n 28\n \n 36\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The result must be stored in the output array
    • \n
    \n\n

    Example 1:

    \n
    \nInput: [1.0, 2.0, 3.0, 4.0]\nOutput: [1.0, 3.0, 6.0, 10.0]\n
    \n\n

    Example 2:

    \n
    \nInput: [5.0, -2.0, 3.0, 1.0, -4.0]\nOutput: [5.0, 3.0, 6.0, 7.0, 3.0]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • -1000.0 ≤ input[i] ≤ 1000.0
    • \n
    • The largest value in the output array will fit within a 32-bit float
    • \n\n
    • Performance is measured with N = 250,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# data and output are tensors on the GPU\ndef solve(data: torch.Tensor, output: torch.Tensor, n: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Prefix Sum\", atol=1e-02, rtol=1e-02, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (N,)\n", + " result = torch.cumsum(input, dim=0)\n", + " output.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(4, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # mixed_signs\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([5.0, -2.0, 3.0, 1.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(5, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + " # single_element\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([42.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + " # power_of_two\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(8, device=\"cuda\", dtype=dtype),\n", + " \"N\": 8,\n", + " }\n", + " )\n", + " # all_zeros\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(1024, device=\"cuda\", dtype=dtype).zero_(),\n", + " \"output\": torch.empty(1024, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + " # random_large\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(2025, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"output\": torch.empty(2025, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2025,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 250000\n", + " input = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0)\n", + " output = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/17_dot_product.ipynb b/challenges/colab_exports/medium/17_dot_product.ipynb new file mode 100644 index 00000000..b300ed80 --- /dev/null +++ b/challenges/colab_exports/medium/17_dot_product.ipynb @@ -0,0 +1,498 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that computes the dot product of two vectors containing 32-bit floating point numbers.\n The dot product is the sum of the products of the corresponding elements of two vectors.\n

    \n

    \n Mathematically, the dot product of two vectors $A$ and $B$ of length $n$ is defined as:\n $$\n A \\cdot B = \\sum_{i=0}^{n-1} A_i \\cdot B_i = A_0 \\cdot B_0 + A_1 \\cdot B_1 + \\ldots + A_{n-1} \\cdot B_{n-1}\n $$\n

    \n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n

    Example 1:

    \n
    Input:  A = [1.0, 2.0, 3.0, 4.0]\n               B = [5.0, 6.0, 7.0, 8.0]\n       Output: result = 70.0  (1.0*5.0 + 2.0*6.0 + 3.0*7.0 + 4.0*8.0)
    \n

    Example 2:

    \n
    Input:  A = [0.5, 1.5, 2.5]\n               B = [2.0, 3.0, 4.0]\n       Output: result = 15.5  (0.5*2.0 + 1.5*3.0 + 2.5*4.0)
    \n

    Constraints

    \n
      \n
    • A and B have identical lengths
    • \n
    • 1 \u2264 N \u2264 100,000,000
    • \n\n
    • Performance is measured with N = 5
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, B, result are device pointers\nextern \"C\" void solve(const float* A, const float* B, float* result, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, result are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, result: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n result: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, result are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, result: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b, result are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, result: torch.Tensor, n: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Dot Product\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, result: torch.Tensor, N: int):\n", + " assert A.shape == (N,)\n", + " assert B.shape == (N,)\n", + " assert result.shape == (1,)\n", + " result[0] = torch.dot(A, B)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"result\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " result = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"result\": result,\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_small\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # all_zeros\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([0.0] * 16, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([0.0] * 16, device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " }\n", + " )\n", + " # negative_numbers\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([-1.0, -2.0, -3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-5.0, -6.0, -7.0, -8.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # mixed_positive_negative\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-1.0, 2.0, -3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # orthogonal_vectors\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([0.0, 1.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # medium_sized_vector\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " }\n", + " )\n", + " # large_vector\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"B\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 5\n", + " A = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " B = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " result = torch.zeros(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"result\": result,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/18_sparse_matrix_vector_multiplication.ipynb b/challenges/colab_exports/medium/18_sparse_matrix_vector_multiplication.ipynb new file mode 100644 index 00000000..ad2f90a3 --- /dev/null +++ b/challenges/colab_exports/medium/18_sparse_matrix_vector_multiplication.ipynb @@ -0,0 +1,650 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that performs sparse matrix-vector multiplication.\n Given a sparse matrix $A$ of dimensions $M \\times N$ and a dense vector $x$ of length $N$,\n compute the product vector $y = A \\times x$, which will have length $M$. A is stored in row-major order.\n nnz is the number of non-zero elements in A.\n

    \n\n

    \n Mathematically, the operation is defined as:\n $$\n y_i = \\sum_{j=0}^{N-1} A_{ij} \\cdot x_j \\quad \\text{for} \\quad i = 0, 1, \\ldots, M-1\n $$\n

    \n\n

    \n The matrix $A$ is approximately 60 - 70% sparse.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in vector y
    • \n
    \n\n

    Example:

    \n

    \nInput:
    \nMatrix $A$ ($3 \\times 4$):\n$$\n\\begin{bmatrix}\n5.0 & 0.0 & 0.0 & 1.0 \\\\\n0.0 & 2.0 & 3.0 & 0.0 \\\\\n0.0 & 0.0 & 0.0 & 4.0\n\\end{bmatrix}\n$$\nVector $x$:\n$$\n\\begin{bmatrix}\n1.0 \\\\\n2.0 \\\\\n3.0 \\\\\n4.0\n\\end{bmatrix}\n$$\nOutput:
    \nVector $y$:\n$$\n\\begin{bmatrix}\n9.0 \\\\\n13.0 \\\\\n16.0\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N ≤ 10,000
    • \n
    • The matrix $A$ is approximately 60-70% sparse (i.e., 60-70% of elements are zero)
    • \n\n
    • Performance is measured with M = 1,000, N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, x, y are device pointers\nextern \"C\" void solve(const float* A, const float* x, float* y, int M, int N, int nnz) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, x, y are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor, x: cute.Tensor, y: cute.Tensor, M: cute.Int32, N: cute.Int32, nnz: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, x are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, x: jax.Array, M: int, N: int, nnz: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n x: UnsafePointer[Float32, MutExternalOrigin],\n y: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n nnz: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, x, y are tensors on the GPU\ndef solve(A: torch.Tensor, x: torch.Tensor, y: torch.Tensor, M: int, N: int, nnz: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# A, x, y are tensors on the GPU\ndef solve(A: torch.Tensor, x: torch.Tensor, y: torch.Tensor, M: int, N: int, nnz: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Sparse Matrix-Vector Multiplication\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, A: torch.Tensor, x: torch.Tensor, y: torch.Tensor, M: int, N: int, nnz: int\n", + " ):\n", + " # Accept A as either flattened (M*N,) or 2D (M, N)\n", + " if A.shape == (M * N,):\n", + " A_matrix = A.view(M, N)\n", + " elif A.shape == (M, N):\n", + " A_matrix = A\n", + " else:\n", + " raise AssertionError(f\"A.shape {A.shape} does not match expected {(M*N,)} or {(M, N)}\")\n", + " assert x.shape == (N,)\n", + " assert y.shape == (M,)\n", + " result = torch.matmul(A_matrix, x)\n", + " y.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"y\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"nnz\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor(\n", + " [5.0, 0.0, 0.0, 1.0, 0.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0, 4.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " x = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " y = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"x\": x,\n", + " \"y\": y,\n", + " \"M\": 3,\n", + " \"N\": 4,\n", + " \"nnz\": 5,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # small_test\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"x\": torch.tensor([1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"y\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 2,\n", + " \"nnz\": 4,\n", + " }\n", + " )\n", + " # identity_test\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"x\": torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"y\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 3,\n", + " \"nnz\": 3,\n", + " }\n", + " )\n", + " # zero_test\n", + " tests.append(\n", + " {\n", + " \"A\": torch.zeros((2, 3), device=\"cuda\", dtype=dtype),\n", + " \"x\": torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"y\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 3,\n", + " \"nnz\": 0,\n", + " }\n", + " )\n", + " # single_element_per_row\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"x\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"y\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 4,\n", + " \"nnz\": 3,\n", + " }\n", + " )\n", + " # negative_values\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [[-1.0, -2.0, -3.0], [-4.0, -5.0, -6.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"x\": torch.tensor([-1.0, -2.0, -3.0], device=\"cuda\", dtype=dtype),\n", + " \"y\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 3,\n", + " \"nnz\": 6,\n", + " }\n", + " )\n", + " # medium_matrix\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor(\n", + " [\n", + " 1.0,\n", + " 0.0,\n", + " 2.0,\n", + " 0.0,\n", + " 0.0,\n", + " 3.0,\n", + " 0.0,\n", + " 4.0,\n", + " 0.0,\n", + " 5.0,\n", + " 0.0,\n", + " 6.0,\n", + " 0.0,\n", + " 0.0,\n", + " 7.0,\n", + " 0.0,\n", + " 8.0,\n", + " 0.0,\n", + " 9.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 1.0,\n", + " 2.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 3.0,\n", + " 0.0,\n", + " 0.0,\n", + " 4.0,\n", + " 5.0,\n", + " 0.0,\n", + " 0.0,\n", + " 6.0,\n", + " 0.0,\n", + " 0.0,\n", + " 7.0,\n", + " 0.0,\n", + " 8.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 9.0,\n", + " 0.0,\n", + " 1.0,\n", + " 0.0,\n", + " 2.0,\n", + " 0.0,\n", + " 3.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 4.0,\n", + " 5.0,\n", + " 6.0,\n", + " 0.0,\n", + " 7.0,\n", + " 8.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 9.0,\n", + " 0.0,\n", + " 1.0,\n", + " 0.0,\n", + " 2.0,\n", + " 3.0,\n", + " 0.0,\n", + " 0.0,\n", + " 0.0,\n", + " 4.0,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"x\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"y\": torch.empty(10, device=\"cuda\", dtype=dtype),\n", + " \"M\": 10,\n", + " \"N\": 8,\n", + " \"nnz\": 35,\n", + " }\n", + " )\n", + "\n", + " # random_sparse_matrix\n", + " M_sparse = 20\n", + " N_sparse = 20\n", + " sparsity = 0.65\n", + "\n", + " # Generate random sparse matrix\n", + " A_dense = torch.empty((M_sparse, N_sparse), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0)\n", + " mask = torch.rand((M_sparse, N_sparse), device=\"cuda\") > sparsity\n", + " A_sparse = A_dense * mask\n", + " nnz_sparse = int(mask.sum().item())\n", + "\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"x\": torch.empty(N_sparse, device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0),\n", + " \"y\": torch.zeros(M_sparse, device=\"cuda\", dtype=dtype),\n", + " \"M\": M_sparse,\n", + " \"N\": N_sparse,\n", + " \"nnz\": nnz_sparse,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M = 1000\n", + " N = 10000\n", + " nnz = 3500000\n", + " A = torch.zeros((M, N), device=\"cuda\", dtype=dtype)\n", + " total_elements = M * N\n", + " flat_indices = torch.randperm(total_elements, device=\"cuda\")[:nnz]\n", + " values = torch.empty(nnz, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " A.view(-1)[flat_indices] = values\n", + "\n", + " # Create a mask: 35% entries will be kept, 65% set to zero\n", + " x = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0)\n", + " y = torch.empty(M, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"x\": x,\n", + " \"y\": y,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"nnz\": nnz,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/22_gemm.ipynb b/challenges/colab_exports/medium/22_gemm.ipynb new file mode 100644 index 00000000..53664f58 --- /dev/null +++ b/challenges/colab_exports/medium/22_gemm.ipynb @@ -0,0 +1,541 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a basic General Matrix Multiplication (GEMM). Given matrix $A$ of dimensions $M \\times K$, matrix $B$ of dimensions $K \\times N$, input/output matrix $C$ of dimensions $M \\times N$, and scalar multipliers $ \\alpha $ and $ \\beta $, compute the operation:\n $$ C = \\alpha \\cdot (A \\times B) + \\beta \\cdot C_{initial} $$\n

    \n

    \n The input matrices $A$, $B$, and the initial state of $C$ contain 16-bit floating-point numbers (FP16/half). All matrices are stored in row-major order. The scalars $ \\alpha $ and $ \\beta $ are 32-bit floats.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries other than WMMA are not permitted).
    • \n
    • The solve function signature must remain unchanged.
    • \n
    • Accumulation during multiplication should use FP32 for better precision before converting the final result to FP16.
    • \n
    • The final result must be stored back into matrix C as half.
    • \n
    \n\n

    Example:

    \n

    \nInput:
    \n(Note: Input matrices A, B, C_initial are FP16 type for the problem)
    \nMatrix $A$ ($M=2, K=3$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 \\\\\n4.0 & 5.0 & 6.0\n\\end{bmatrix}\n$$\nMatrix $B$ ($K=3, N=2$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 \\\\\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\nMatrix $C_{initial}$ ($M=2, N=2$):\n$$\n\\begin{bmatrix}\n1.0 & 1.0 \\\\\n1.0 & 1.0\n\\end{bmatrix}\n$$\n$$\\alpha = 1.0 \\text{ (FP32)}$$\n$$\\beta = 0.0 \\text{ (FP32)}$$\n\nOutput (FP16):
    \nMatrix $C$ ($M=2, N=2$):\n$$\n\\begin{bmatrix}\n22.0 & 28.0 \\\\\n49.0 & 64.0\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 16 ≤ M, N, K ≤ 4096
    • \n\n
    • Performance is measured with K = 1,024, M = 1,024, N = 1,024
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n\n// A, B, and C are device pointers\nextern \"C\" void solve(const half* A, const half* B, half* C, int M, int N, int K, float alpha,\n float beta) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n alpha: cute.Float32,\n beta: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(\n A: jax.Array, B: jax.Array, M: int, N: int, K: int, alpha: float, beta: float\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Float16, MutExternalOrigin],\n B: UnsafePointer[Float16, MutExternalOrigin],\n C: UnsafePointer[Float16, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n alpha: Float32,\n beta: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(\n A: torch.Tensor,\n B: torch.Tensor,\n C: torch.Tensor,\n M: int,\n N: int,\n K: int,\n alpha: float,\n beta: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b, c are tensors on the GPU\ndef solve(\n a: torch.Tensor,\n b: torch.Tensor,\n c: torch.Tensor,\n M: int,\n N: int,\n K: int,\n alpha: float,\n beta: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"General Matrix Multiplication (GEMM)\",\n", + " atol=5e-2,\n", + " rtol=5e-2,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " C: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " K: int,\n", + " alpha: float,\n", + " beta: float,\n", + " ):\n", + " assert A.shape == (M, K)\n", + " assert B.shape == (K, N)\n", + " assert C.shape == (M, N)\n", + " A_f32 = A.view(M, K).to(torch.float32)\n", + " B_f32 = B.view(K, N).to(torch.float32)\n", + " C_f32 = C.view(M, N).to(torch.float32)\n", + " matmul_result = torch.matmul(A_f32, B_f32)\n", + " final_result = alpha * matmul_result + beta * C_f32\n", + " C.copy_(final_result.to(torch.float16))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_uint16), \"inout\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"alpha\": (ctypes.c_float, \"in\"),\n", + " \"beta\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " A = torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype)\n", + " C = torch.tensor([[1.0, 1.0], [1.0, 1.0]], device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": 2,\n", + " \"N\": 2,\n", + " \"K\": 3,\n", + " \"alpha\": 1.0,\n", + " \"beta\": 0.0,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float16\n", + " tests = []\n", + "\n", + " # 16x16x16_a1_b0\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.zeros((16, 16), device=\"cuda\", dtype=dtype),\n", + " \"M\": 16,\n", + " \"N\": 16,\n", + " \"K\": 16,\n", + " \"alpha\": 1.0,\n", + " \"beta\": 0.0,\n", + " }\n", + " )\n", + "\n", + " # 16x16x16_a1_b1\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-0.5, 0.5),\n", + " \"B\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-0.5, 0.5),\n", + " \"C\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-0.5, 0.5),\n", + " \"M\": 16,\n", + " \"N\": 16,\n", + " \"K\": 16,\n", + " \"alpha\": 1.0,\n", + " \"beta\": 1.0,\n", + " }\n", + " )\n", + "\n", + " # 32x16x16_a0.5_b0.5\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty((32, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((32, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"M\": 32,\n", + " \"N\": 16,\n", + " \"K\": 16,\n", + " \"alpha\": 0.5,\n", + " \"beta\": 0.5,\n", + " }\n", + " )\n", + "\n", + " # 16x32x16_a1_b1\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty((16, 32), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((16, 32), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"M\": 16,\n", + " \"N\": 32,\n", + " \"K\": 16,\n", + " \"alpha\": 1.0,\n", + " \"beta\": 1.0,\n", + " }\n", + " )\n", + "\n", + " # 16x16x32_a0_b1\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty((16, 32), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty((32, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((16, 16), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"M\": 16,\n", + " \"N\": 16,\n", + " \"K\": 32,\n", + " \"alpha\": 0.0,\n", + " \"beta\": 1.0,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " M = 1024\n", + " N = 1024\n", + " K = 1024\n", + " A = torch.empty((M, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " B = torch.empty((K, N), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " C = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"alpha\": 1.0,\n", + " \"beta\": 1.0,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/25_categorical_cross_entropy_loss.ipynb b/challenges/colab_exports/medium/25_categorical_cross_entropy_loss.ipynb new file mode 100644 index 00000000..0ce4e775 --- /dev/null +++ b/challenges/colab_exports/medium/25_categorical_cross_entropy_loss.ipynb @@ -0,0 +1,547 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program to calculate the categorical cross-entropy loss for a batch of predictions.\n Given a matrix of predicted logits $Z$ of size $N \\times C$ and a vector of true class labels true_labels of size $N$, compute the average cross-entropy loss over the batch.\n The loss for a single sample $j$ with logits $z_j = [z_{j1}, \\ldots, z_{jC}]$ and true label $y_j$ is calculated using the numerically stable formula:\n $$ \\text{Loss}_j = \\log\\left(\\sum_{k=1}^{C} e^{z_{jk}}\\right) - z_{j, y_j} $$\n The final output stored in the loss variable should be the average loss over the $N$ samples:\n $$ L = \\frac{1}{N} \\sum_{j=1}^{N} \\text{Loss}_j $$\n The input parameters are logits, true_labels, N (number of samples), and C (number of classes). The result should be stored in loss (a pointer to a single float).\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result (average loss) must be stored in loss
    • \n
    \n\n

    Example 1:

    \n
    Input:  N = 2, C = 3\n        logits = [[1.0, 2.0, 0.5], [0.1, 3.0, 1.5]]\n        true_labels = [1, 1]\nOutput: loss = [0.3548926]
    \n\n\n

    Example 2:

    \n
    Input:  N = 3, C = 4\n        logits = [[-0.5, 1.5, 0.0, 1.0], [2.0, -1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 0.0]]\n        true_labels = [3, 0, 1]\nOutput: loss = [0.98820376]
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 10,000
    • \n
    • 2 ≤ C ≤ 1,000
    • \n
    • -10.0 ≤ logits[i, j] ≤ 10.0
    • \n
    • 0 ≤ true_labels[i]C
    • \n\n
    • Performance is measured with N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// logits, true_labels, loss are device pointers\nextern \"C\" void solve(const float* logits, const int* true_labels, float* loss, int N, int C) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# logits, true_labels, loss are tensors on the GPU\n@cute.jit\ndef solve(\n logits: cute.Tensor, true_labels: cute.Tensor, loss: cute.Tensor, N: cute.Int32, C: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# logits, true_labels are tensors on the GPU\n@jax.jit\ndef solve(logits: jax.Array, true_labels: jax.Array, N: int, C: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n logits: UnsafePointer[Float32, MutExternalOrigin],\n true_labels: UnsafePointer[Int32, MutExternalOrigin],\n loss: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n C: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# logits, true_labels, loss are tensors on the GPU\ndef solve(logits: torch.Tensor, true_labels: torch.Tensor, loss: torch.Tensor, N: int, C: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# logits, true_labels, loss are tensors on the GPU\ndef solve(logits: torch.Tensor, true_labels: torch.Tensor, loss: torch.Tensor, N: int, C: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Categorical Cross Entropy Loss\",\n", + " atol=1e-05,\n", + " rtol=1e-05,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, logits: torch.Tensor, true_labels: torch.Tensor, loss: torch.Tensor, N: int, C: int\n", + " ):\n", + " assert logits.dtype == torch.float32\n", + " assert true_labels.dtype == torch.int32\n", + " assert loss.dtype == torch.float32\n", + " assert logits.shape == (N, C)\n", + " assert true_labels.shape == (N,)\n", + " assert loss.shape == (1,)\n", + " assert N > 0 and C > 0\n", + " total_loss = 0.0\n", + " for i in range(N):\n", + " log_probs = logits[i]\n", + " true_label = true_labels[i].item()\n", + " assert 0 <= true_label < C\n", + " max_logit = torch.max(log_probs)\n", + " log_sum_exp = max_logit + torch.log(torch.sum(torch.exp(log_probs - max_logit)))\n", + " loss_i = log_sum_exp - log_probs[true_label]\n", + " total_loss += loss_i.item()\n", + " loss[0] = total_loss / N\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"logits\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"true_labels\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"loss\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"C\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype_logits = torch.float32\n", + " dtype_labels = torch.int32\n", + " logits = torch.tensor([[1.0, 2.0, 0.5], [0.1, 3.0, 1.5]], device=\"cuda\", dtype=dtype_logits)\n", + " true_labels = torch.tensor([1, 1], device=\"cuda\", dtype=dtype_labels)\n", + " loss = torch.zeros(1, device=\"cuda\", dtype=dtype_logits)\n", + " return {\n", + " \"logits\": logits,\n", + " \"true_labels\": true_labels,\n", + " \"loss\": loss,\n", + " \"N\": 2,\n", + " \"C\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype_logits = torch.float32\n", + " dtype_labels = torch.int32\n", + " tests = []\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[1.0, 2.0, 0.5], [0.1, 3.0, 1.5]], device=\"cuda\", dtype=dtype_logits\n", + " ),\n", + " \"true_labels\": torch.tensor([1, 1], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 2,\n", + " \"C\": 3,\n", + " }\n", + " )\n", + " # example_2\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[-0.5, 1.5, 0.0, 1.0], [2.0, -1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype_logits,\n", + " ),\n", + " \"true_labels\": torch.tensor([3, 0, 1], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 3,\n", + " \"C\": 4,\n", + " }\n", + " )\n", + " # single_sample\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[0.1, 0.2, 0.3, 0.4, 0.5]], device=\"cuda\", dtype=dtype_logits\n", + " ),\n", + " \"true_labels\": torch.tensor([4], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 1,\n", + " \"C\": 5,\n", + " }\n", + " )\n", + " # uniform_logits_correct_label\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor([[1.0] * 5, [1.0] * 5], device=\"cuda\", dtype=dtype_logits),\n", + " \"true_labels\": torch.tensor([0, 0], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 2,\n", + " \"C\": 5,\n", + " }\n", + " )\n", + " # high_confidence_correct\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[-5.0, -5.0, 10.0, -5.0], [10.0, -5.0, -5.0, -5.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype_logits,\n", + " ),\n", + " \"true_labels\": torch.tensor([2, 0], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 2,\n", + " \"C\": 4,\n", + " }\n", + " )\n", + " # high_confidence_incorrect\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[10.0, -5.0, -5.0], [-5.0, 10.0, -5.0]], device=\"cuda\", dtype=dtype_logits\n", + " ),\n", + " \"true_labels\": torch.tensor([1, 2], device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 2,\n", + " \"C\": 3,\n", + " }\n", + " )\n", + " # larger_batch_random\n", + " tests.append(\n", + " {\n", + " \"logits\": torch.empty(100, 5, device=\"cuda\", dtype=dtype_logits).uniform_(\n", + " -5.0, 5.0\n", + " ),\n", + " \"true_labels\": torch.randint(0, 5, (100,), device=\"cuda\", dtype=dtype_labels),\n", + " \"loss\": torch.zeros(1, device=\"cuda\", dtype=dtype_logits),\n", + " \"N\": 100,\n", + " \"C\": 5,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype_logits = torch.float32\n", + " dtype_labels = torch.int32\n", + " logits = torch.empty(10000, 1000, device=\"cuda\", dtype=dtype_logits).uniform_(-10.0, 10.0)\n", + " true_labels = torch.randint(0, 1000, (10000,), device=\"cuda\", dtype=dtype_labels)\n", + " loss = torch.zeros(1, device=\"cuda\", dtype=dtype_logits)\n", + " return {\n", + " \"logits\": logits,\n", + " \"true_labels\": true_labels,\n", + " \"loss\": loss,\n", + " \"N\": 10000,\n", + " \"C\": 1000,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/27_mean_squared_error.ipynb b/challenges/colab_exports/medium/27_mean_squared_error.ipynb new file mode 100644 index 00000000..507c3fe5 --- /dev/null +++ b/challenges/colab_exports/medium/27_mean_squared_error.ipynb @@ -0,0 +1,497 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program to calculate the Mean Squared Error (MSE) between\n predicted values and target values. Given two arrays of equal length,\n predictions and targets, compute: $$ \\text{MSE} =\n \\frac{1}{N} \\sum_{i=1}^{N} (predictions_i - targets_i)^2 $$ where N is the\n number of elements in each array.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted.
    • \n
    • The solve function signature must remain unchanged.
    • \n
    • The final result must be stored in the mse variable.
    • \n
    \n\n

    Example 1:

    \n
    \n  Input:  predictions = [1.0, 2.0, 3.0, 4.0]\n          targets = [1.5, 2.5, 3.5, 4.5]\n  Output: mse = 0.25\n
    \n\n

    Example 2:

    \n
    \n  Input:  predictions = [10.0, 20.0, 30.0]\n          targets = [12.0, 18.0, 33.0]\n  Output: mse = 5.67\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • \n -1000.0 ≤ predictions[i], targets[i] ≤\n 1000.0\n
    • \n\n
    • Performance is measured with N = 50,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// predictions, targets, mse are device pointers\nextern \"C\" void solve(const float* predictions, const float* targets, float* mse, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# predictions, targets, mse are tensors on the GPU\n@cute.jit\ndef solve(predictions: cute.Tensor, targets: cute.Tensor, mse: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# predictions, targets are tensors on the GPU\n@jax.jit\ndef solve(predictions: jax.Array, targets: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n predictions: UnsafePointer[Float32, MutExternalOrigin],\n targets: UnsafePointer[Float32, MutExternalOrigin],\n mse: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# predictions, targets, mse are tensors on the GPU\ndef solve(predictions: torch.Tensor, targets: torch.Tensor, mse: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# predictions, targets, mse are tensors on the GPU\ndef solve(predictions: torch.Tensor, targets: torch.Tensor, mse: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Mean Squared Error\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, predictions: torch.Tensor, targets: torch.Tensor, mse: torch.Tensor, N: int\n", + " ):\n", + " # predictions, targets, mse are tensors on the GPU\n", + " squared_diffs = torch.square(predictions - targets)\n", + " mean_squared_error = torch.mean(squared_diffs)\n", + " mse[0] = mean_squared_error\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"predictions\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"targets\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"mse\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " predictions = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " targets = torch.tensor([1.5, 2.5, 3.5, 4.5], device=\"cuda\", dtype=dtype)\n", + " mse = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " N = 4\n", + " return {\n", + " \"predictions\": predictions,\n", + " \"targets\": targets,\n", + " \"mse\": mse,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # Test 1: basic_example\n", + " tests.append(\n", + " {\n", + " \"predictions\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"targets\": torch.tensor([1.5, 2.5, 3.5, 4.5], device=\"cuda\", dtype=dtype),\n", + " \"mse\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # Test 2: second_example\n", + " tests.append(\n", + " {\n", + " \"predictions\": torch.tensor([10.0, 20.0, 30.0], device=\"cuda\", dtype=dtype),\n", + " \"targets\": torch.tensor([12.0, 18.0, 33.0], device=\"cuda\", dtype=dtype),\n", + " \"mse\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # Test 3: zero_error\n", + " tests.append(\n", + " {\n", + " \"predictions\": torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5], device=\"cuda\", dtype=dtype),\n", + " \"targets\": torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5], device=\"cuda\", dtype=dtype),\n", + " \"mse\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + " # Test 4: negative_values\n", + " tests.append(\n", + " {\n", + " \"predictions\": torch.tensor([-2.5, -1.0, 0.0, 1.5], device=\"cuda\", dtype=dtype),\n", + " \"targets\": torch.tensor([-1.5, -2.0, 0.5, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"mse\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # Test 5: large_difference\n", + " tests.append(\n", + " {\n", + " \"predictions\": torch.tensor([100.0, 200.0, 300.0], device=\"cuda\", dtype=dtype),\n", + " \"targets\": torch.tensor([150.0, 250.0, 350.0], device=\"cuda\", dtype=dtype),\n", + " \"mse\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # Test 6: medium_size\n", + " N = 1024\n", + " predictions = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " targets = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " mse = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " tests.append({\"predictions\": predictions, \"targets\": targets, \"mse\": mse, \"N\": N})\n", + " # Test 7: large_size\n", + " N = 10000\n", + " predictions = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0)\n", + " targets = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0)\n", + " mse = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " tests.append({\"predictions\": predictions, \"targets\": targets, \"mse\": mse, \"N\": N})\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50000000\n", + " predictions = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0)\n", + " targets = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0)\n", + " mse = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"predictions\": predictions,\n", + " \"targets\": targets,\n", + " \"mse\": mse,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/28_gaussian_blur.ipynb b/challenges/colab_exports/medium/28_gaussian_blur.ipynb new file mode 100644 index 00000000..12c6b1a8 --- /dev/null +++ b/challenges/colab_exports/medium/28_gaussian_blur.ipynb @@ -0,0 +1,585 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that applies a Gaussian blur filter to a 2D image. Given an input image represented as a floating-point array and a Gaussian kernel, the program should compute the convolution of the image with the kernel.\n All inputs and outputs are stored in row-major order.\n

    \n\n

    \n The Gaussian blur is performed by convolving each pixel with a weighted average of its neighbors, where the weights are determined by the Gaussian kernel. For each output pixel at position (i, j), the value is calculated as:\n\n $$ output[i, j] = \\sum_{m=-k_h/2}^{k_h/2} \\sum_{n=-k_w/2}^{k_w/2} input[i+m, j+n] \\times kernel[m+k_h/2, n+k_w/2] $$\n\n where $k_h$ and $k_w$ are the kernel height and width.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output array
    • \n
    • Handle boundary conditions by using zero-padding (treat values outside the image boundary as zeros)
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\n  image (5, 5) = [\n    [1.0, 2.0, 3.0, 4.0, 5.0],\n    [6.0, 7.0, 8.0, 9.0, 10.0],\n    [11.0, 12.0, 13.0, 14.0, 15.0],\n    [16.0, 17.0, 18.0, 19.0, 20.0],\n    [21.0, 22.0, 23.0, 24.0, 25.0]\n  ]\n\n  kernel (3, 3) = [\n    [0.0625, 0.125, 0.0625],\n    [0.125, 0.25, 0.125],\n    [0.0625, 0.125, 0.0625]\n  ]\n\nOutput:\n  output (5, 5) = [\n    [1.6875, 2.75, 3.5, 4.25, 3.5625],\n    [4.75, 7.0, 8.0, 9.0, 7.25],\n    [8.5, 12.0, 13.0, 14.0, 11.0],\n    [12.25, 17.0, 18.0, 19.0, 14.75],\n    [11.0625, 15.25, 16.0, 16.75, 12.9375]\n  ]\n\n
    \n\n

    Example 2:

    \n
    \nInput:\n  image (3, 3) = [\n    [10.0, 20.0, 30.0],\n    [40.0, 50.0, 60.0],\n    [70.0, 80.0, 90.0]\n  ]\n\n  kernel (3, 3) = [\n    [0.1, 0.1, 0.1],\n    [0.1, 0.2, 0.1],\n    [0.1, 0.1, 0.1]\n  ]\n\nOutput:\n  output (3, 3) = [\n    [13.0, 23.0, 19.0],\n    [31.0, 50.0, 39.0],\n    [31.0, 47.0, 37.0]\n  ]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 input_rows, input_cols \u2264 4096
    • \n
    • 3 \u2264 kernel_rows, kernel_cols \u2264 21
    • \n
    • Both kernel_rows and kernel_cols will be odd numbers
    • \n
    • All kernel values will be non-negative and sum to 1.0 (normalized)
    • \n\n
    • Performance is measured with input_cols = 512, input_rows = 512, kernel_cols = 7, kernel_rows = 7
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, kernel, output are device pointers\nextern \"C\" void solve(const float* input, const float* kernel, float* output, int input_rows,\n int input_cols, int kernel_rows, int kernel_cols) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, kernel, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n kernel: cute.Tensor,\n output: cute.Tensor,\n input_rows: cute.Int32,\n input_cols: cute.Int32,\n kernel_rows: cute.Int32,\n kernel_cols: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, kernel are tensors on the GPU\n@jax.jit\ndef solve(\n input: jax.Array,\n kernel: jax.Array,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n kernel: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n input_rows: Int32,\n input_cols: Int32,\n kernel_rows: Int32,\n kernel_cols: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, kernel, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n kernel: torch.Tensor,\n output: torch.Tensor,\n input_rows: int,\n input_cols: int,\n kernel_rows: int,\n kernel_cols: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Gaussian Blur\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " kernel: torch.Tensor,\n", + " output: torch.Tensor,\n", + " input_rows: int,\n", + " input_cols: int,\n", + " kernel_rows: int,\n", + " kernel_cols: int,\n", + " ):\n", + " input_2d = input.view(1, 1, input_rows, input_cols)\n", + " kernel_2d = kernel.view(1, 1, kernel_rows, kernel_cols)\n", + " pad_h = kernel_rows // 2\n", + " pad_w = kernel_cols // 2\n", + " result = torch.nn.functional.conv2d(input_2d, kernel_2d, padding=(pad_h, pad_w))\n", + " output[:] = result.view(-1)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"kernel\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"input_rows\": (ctypes.c_int, \"in\"),\n", + " \"input_cols\": (ctypes.c_int, \"in\"),\n", + " \"kernel_rows\": (ctypes.c_int, \"in\"),\n", + " \"kernel_cols\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_rows, input_cols = 5, 5\n", + " kernel_rows, kernel_cols = 3, 3\n", + " input = torch.tensor(\n", + " [\n", + " 1.0,\n", + " 2.0,\n", + " 3.0,\n", + " 4.0,\n", + " 5.0,\n", + " 6.0,\n", + " 7.0,\n", + " 8.0,\n", + " 9.0,\n", + " 10.0,\n", + " 11.0,\n", + " 12.0,\n", + " 13.0,\n", + " 14.0,\n", + " 15.0,\n", + " 16.0,\n", + " 17.0,\n", + " 18.0,\n", + " 19.0,\n", + " 20.0,\n", + " 21.0,\n", + " 22.0,\n", + " 23.0,\n", + " 24.0,\n", + " 25.0,\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " kernel = torch.tensor(\n", + " [0.0625, 0.125, 0.0625, 0.125, 0.25, 0.125, 0.0625, 0.125, 0.0625],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output = torch.empty(input_rows * input_cols, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"kernel\": kernel,\n", + " \"output\": output,\n", + " \"input_rows\": input_rows,\n", + " \"input_cols\": input_cols,\n", + " \"kernel_rows\": kernel_rows,\n", + " \"kernel_cols\": kernel_cols,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [\n", + " [1.0, 2.0, 3.0, 4.0, 5.0],\n", + " [6.0, 7.0, 8.0, 9.0, 10.0],\n", + " [11.0, 12.0, 13.0, 14.0, 15.0],\n", + " [16.0, 17.0, 18.0, 19.0, 20.0],\n", + " [21.0, 22.0, 23.0, 24.0, 25.0],\n", + " ],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten(),\n", + " \"kernel\": torch.tensor(\n", + " [[0.0625, 0.125, 0.0625], [0.125, 0.25, 0.125], [0.0625, 0.125, 0.0625]],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten(),\n", + " \"output\": torch.zeros((5, 5), device=device, dtype=dtype).flatten(),\n", + " \"input_rows\": 5,\n", + " \"input_cols\": 5,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # identity_kernel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], device=device, dtype=dtype\n", + " ).flatten(),\n", + " \"kernel\": torch.tensor(\n", + " [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], device=device, dtype=dtype\n", + " ).flatten(),\n", + " \"output\": torch.zeros((3, 3), device=device, dtype=dtype).flatten(),\n", + " \"input_rows\": 3,\n", + " \"input_cols\": 3,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # all_ones_input\n", + " tests.append(\n", + " {\n", + " \"input\": torch.ones((4, 4), device=device, dtype=dtype).flatten(),\n", + " \"kernel\": torch.full((3, 3), 0.111111, device=device, dtype=dtype).flatten(),\n", + " \"output\": torch.zeros((4, 4), device=device, dtype=dtype).flatten(),\n", + " \"input_rows\": 4,\n", + " \"input_cols\": 4,\n", + " \"kernel_rows\": 3,\n", + " \"kernel_cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # single_pixel\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[42.0]], device=device, dtype=dtype).flatten(),\n", + " \"kernel\": torch.tensor([[1.0]], device=device, dtype=dtype).flatten(),\n", + " \"output\": torch.zeros((1, 1), device=device, dtype=dtype).flatten(),\n", + " \"input_rows\": 1,\n", + " \"input_cols\": 1,\n", + " \"kernel_rows\": 1,\n", + " \"kernel_cols\": 1,\n", + " }\n", + " )\n", + "\n", + " # large_random\n", + " input_large = torch.empty((32, 32), device=device, dtype=dtype).uniform_(-10.0, 10.0)\n", + " kernel_large = torch.empty((5, 5), device=device, dtype=dtype).uniform_(0.0, 1.0)\n", + " tests.append(\n", + " {\n", + " \"input\": input_large.flatten(),\n", + " \"kernel\": kernel_large.flatten(),\n", + " \"output\": torch.zeros((32, 32), device=device, dtype=dtype).flatten(),\n", + " \"input_rows\": 32,\n", + " \"input_cols\": 32,\n", + " \"kernel_rows\": 5,\n", + " \"kernel_cols\": 5,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input_rows, input_cols = 512, 512\n", + " kernel_rows, kernel_cols = 7, 7\n", + " input = torch.empty(input_rows * input_cols, device=\"cuda\", dtype=dtype).uniform_(\n", + " 0.0, 255.0\n", + " )\n", + " kernel = torch.empty(kernel_rows * kernel_cols, device=\"cuda\", dtype=dtype).uniform_(\n", + " 0.0001, 0.02\n", + " )\n", + " output = torch.empty(input_rows * input_cols, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"kernel\": kernel,\n", + " \"output\": output,\n", + " \"input_rows\": input_rows,\n", + " \"input_cols\": input_cols,\n", + " \"kernel_rows\": kernel_rows,\n", + " \"kernel_cols\": kernel_cols,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/29_top_k_selection.ipynb b/challenges/colab_exports/medium/29_top_k_selection.ipynb new file mode 100644 index 00000000..7eed7f24 --- /dev/null +++ b/challenges/colab_exports/medium/29_top_k_selection.ipynb @@ -0,0 +1,493 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that, given a 1D array input of 32-bit floating point numbers of length N, selects the k largest elements and writes them in descending order to the output array of length k.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output array
    • \n
    \n\n

    Example 1:

    \n
    \n  Input:\n  input = [1.0, 5.0, 3.0, 2.0, 4.0]\n  N = 5\n  k = 3\n\n  Output:\n  output = [5.0, 4.0, 3.0]\n  
    \n\n

    Example 2:

    \n
    \n  Input:\n  input = [7.2, -1.0, 3.3, 8.8, 2.2]\n  N = 5\n  k = 2\n\n  Output:\n  output = [8.8, 7.2]\n  
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 100,000,000
    • \n
    • 1 \u2264 k \u2264 N
    • \n
    • All values in input are 32-bit floats
    • \n\n
    • Performance is measured with N = 50,000,000, k = 100
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N, int k) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, k: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, k: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n k: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, k: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, k: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Top K Selection\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, k: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (k,)\n", + " assert input.dtype == output.dtype == torch.float32\n", + " assert input.device == output.device\n", + " topk = torch.topk(input, k, largest=True).values\n", + " output.copy_(topk)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"k\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor([1.0, 5.0, 3.0, 2.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 5,\n", + " \"k\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 5.0, 3.0, 2.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " \"k\": 3,\n", + " }\n", + " )\n", + " # negative_numbers\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [-2.0, -1.0, -3.0, -4.0, -5.0, -6.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"N\": 6,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + " # all_equal\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([7.0, 7.0, 7.0, 7.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " \"k\": 3,\n", + " }\n", + " )\n", + " # single_element\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([42.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " \"k\": 1,\n", + " }\n", + " )\n", + " # reverse_sorted\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([5.0, 4.0, 3.0, 2.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + " # large_random (simulated; actual is random in runner)\n", + " N, k = 1000, 10\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"output\": torch.empty(k, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"k\": k,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50000000\n", + " k = 100\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1e6, 1e6),\n", + " \"output\": torch.empty(k, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"k\": k,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/30_batched_matrix_multiplication.ipynb b/challenges/colab_exports/medium/30_batched_matrix_multiplication.ipynb new file mode 100644 index 00000000..3f40a472 --- /dev/null +++ b/challenges/colab_exports/medium/30_batched_matrix_multiplication.ipynb @@ -0,0 +1,499 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a batched matrix multiplication in FP32. Given a batch of matrices A of shape [B, M, K] and a batch of matrices B of shape [B, K, N], compute the output batch C of shape [B, M, N] such that for each batch index b:\n $$\n C_b = A_b \\times B_b\n $$\n All matrices are stored in row-major order and use 32-bit floating point numbers (FP32).\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the C array
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\nB = 2, M = 2, K = 3, N = 2\nA = [\n  [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],\n  [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]\n]\nB = [\n  [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],\n  [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]\n]\nOutput:\nC = [\n  [[22.0, 28.0], [49.0, 64.0]],\n  [[92.0, 68.0], [128.0, 95.0]]\n]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ B ≤ 128
    • \n
    • 1 ≤ M, N, K ≤ 1024
    • \n\n
    • Performance is measured with K = 256, M = 256, N = 256
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, B, C are device pointers\nextern \"C\" void solve(const float* A, const float* B, float* C, int BATCH, int M, int N, int K) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n BATCH: cute.Int32,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, BATCH: int, M: int, N: int, K: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n BATCH: Int32,\n M: Int32,\n N: Int32,\n K: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, BATCH: int, M: int, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b, c are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, BATCH: int, M: int, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Batched Matrix Multiplication\",\n", + " atol=1e-5,\n", + " rtol=1e-5,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, BATCH: int, M: int, N: int, K: int\n", + " ):\n", + " # A: (BATCH, M, K), B: (BATCH, K, N), C: (BATCH, M, N)\n", + " A = A.view(BATCH, M, K)\n", + " B = B.view(BATCH, K, N)\n", + " C.copy_(torch.bmm(A, B))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"BATCH\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " BATCH, M, K, N = 2, 2, 3, 2\n", + " A = torch.tensor(\n", + " [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " B = torch.tensor(\n", + " [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " C = torch.empty(BATCH, M, N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"BATCH\": BATCH, \"M\": M, \"N\": N, \"K\": K}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # 1. basic_example\n", + " A1 = torch.tensor(\n", + " [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten()\n", + " B1 = torch.tensor(\n", + " [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten()\n", + " C1 = torch.empty((2, 2, 2), device=device, dtype=dtype)\n", + " tests.append({\"A\": A1, \"B\": B1, \"C\": C1, \"BATCH\": 2, \"M\": 2, \"N\": 2, \"K\": 3})\n", + "\n", + " # 2. single_batch\n", + " A2 = torch.tensor(\n", + " [[[1.0, 0.0, 2.0], [0.0, 1.0, 2.0], [2.0, 1.0, 0.0]]], device=device, dtype=dtype\n", + " ).flatten()\n", + " B2 = torch.tensor(\n", + " [[[2.0, 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 1.0, 2.0]]], device=device, dtype=dtype\n", + " ).flatten()\n", + " C2 = torch.empty((1, 3, 3), device=device, dtype=dtype)\n", + " tests.append({\"A\": A2, \"B\": B2, \"C\": C2, \"BATCH\": 1, \"M\": 3, \"N\": 3, \"K\": 3})\n", + "\n", + " # 3. batch_4_small\n", + " A3 = torch.empty((4, 2, 2), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " B3 = torch.empty((4, 2, 2), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " C3 = torch.empty((4, 2, 2), device=device, dtype=dtype)\n", + " tests.append({\"A\": A3, \"B\": B3, \"C\": C3, \"BATCH\": 4, \"M\": 2, \"N\": 2, \"K\": 2})\n", + "\n", + " # 4. batch_8_rectangular\n", + " A4 = torch.empty((8, 4, 2), device=device, dtype=dtype).uniform_(-10.0, 10.0)\n", + " B4 = torch.empty((8, 2, 3), device=device, dtype=dtype).uniform_(-10.0, 10.0)\n", + " C4 = torch.empty((8, 4, 3), device=device, dtype=dtype)\n", + " tests.append({\"A\": A4, \"B\": B4, \"C\": C4, \"BATCH\": 8, \"M\": 4, \"N\": 3, \"K\": 2})\n", + "\n", + " # 5. batch_16_large\n", + " A5 = torch.empty((16, 16, 16), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " B5 = torch.empty((16, 16, 16), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " C5 = torch.empty((16, 16, 16), device=device, dtype=dtype)\n", + " tests.append({\"A\": A5, \"B\": B5, \"C\": C5, \"BATCH\": 16, \"M\": 16, \"N\": 16, \"K\": 16})\n", + "\n", + " # 6. batch_2_non_square\n", + " A6 = torch.empty((2, 8, 4), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " B6 = torch.empty((2, 4, 6), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " C6 = torch.empty((2, 8, 6), device=device, dtype=dtype)\n", + " tests.append({\"A\": A6, \"B\": B6, \"C\": C6, \"BATCH\": 2, \"M\": 8, \"N\": 6, \"K\": 4})\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " BATCH, M, N, K = 32, 256, 256, 256 # Match speed_test.json\n", + " A = torch.empty(BATCH, M, K, device=\"cuda\", dtype=dtype).uniform_(\n", + " -10.0, 10.0\n", + " ) # Match range\n", + " B = torch.empty(BATCH, K, N, device=\"cuda\", dtype=dtype).uniform_(\n", + " -10.0, 10.0\n", + " ) # Match range\n", + " C = torch.empty(BATCH, M, N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"BATCH\": BATCH, \"M\": M, \"N\": N, \"K\": K}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/32_int8_quantized_matmul.ipynb b/challenges/colab_exports/medium/32_int8_quantized_matmul.ipynb new file mode 100644 index 00000000..d44d8879 --- /dev/null +++ b/challenges/colab_exports/medium/32_int8_quantized_matmul.ipynb @@ -0,0 +1,601 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a quantized matrix multiplication program for 8-bit signed integer matrices. Given two input matrices A of dimensions $M \\times K$ and B of dimensions $K \\times N$, quantization scales scale_A, scale_B, output scale scale_C, zero-points zero_point_A, zero_point_B, zero_point_C, compute:\n $$\n C_{\\text{quant}}(i, j) = \\mathrm{clamp}\\left(\n \\mathrm{round}\\left(\n \\frac{\n \\sum_{k=0}^{K-1} (A_{ik} - z_A)(B_{kj} - z_B) \\cdot s_A s_B\n }{s_C}\n \\right) + z_C,\\ -128,\\ 127\n \\right)\n $$\n where s_A = scale_A, z_A = zero_point_A, etc.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output matrix C as int8
    • \n
    • After accumulation in int32 and scaling in float32, values must be rounded to the nearest integer, shifted by zero_point_C, and clamped to the [-128, 127] range
    • \n
    \n\n

    Example 1:

    \n
    \n     Input:\n     A = [[1, 2],\n          [3, 4]]\n     B = [[5, 6],\n          [7, 8]]\n     M = 2, N = 2, K = 2\n     scale_A = 0.1, scale_B = 0.2, scale_C = 0.05\n     zero_point_A = 0, zero_point_B = 0, zero_point_C = 0\n\n     Output:\n     C = [[19, 22],\n          [43, 50]]\n     
    \n\n

    Example 2:

    \n
    \n     Input:\n     A = [[1, 2]]\n     B = [[3],\n          [4]]\n     M = 1, N = 1, K = 2\n     scale_A = 1.0, scale_B = 1.0, scale_C = 1.0\n     zero_point_A = 1, zero_point_B = 3, zero_point_C = 5\n\n     Output:\n     C = [[6]]\n     
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 M, N, K \u2264 4096
    • \n
    • scale_A, scale_B, scale_C are positive floats
    • \n
    • -128 \u2264 zero_point_A, zero_point_B, zero_point_C \u2264 127
    • \n\n
    • Performance is measured with K = 2,048, M = 8,192, N = 4,096
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, B, C are device pointers\nextern \"C\" void solve(const int8_t* A, const int8_t* B, int8_t* C, int M, int N, int K,\n float scale_A, float scale_B, float scale_C, int zero_point_A,\n int zero_point_B, int zero_point_C) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n scale_A: cute.Float32,\n scale_B: cute.Float32,\n scale_C: cute.Float32,\n zero_point_A: cute.Int32,\n zero_point_B: cute.Int32,\n zero_point_C: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(\n A: jax.Array,\n B: jax.Array,\n M: int,\n N: int,\n K: int,\n scale_A: float,\n scale_B: float,\n scale_C: float,\n zero_point_A: int,\n zero_point_B: int,\n zero_point_C: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Int8, MutExternalOrigin],\n B: UnsafePointer[Int8, MutExternalOrigin],\n C: UnsafePointer[Int8, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n scale_A: Float32,\n scale_B: Float32,\n scale_C: Float32,\n zero_point_A: Int32,\n zero_point_B: Int32,\n zero_point_C: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(\n A: torch.Tensor,\n B: torch.Tensor,\n C: torch.Tensor,\n M: int,\n N: int,\n K: int,\n scale_A: float,\n scale_B: float,\n scale_C: float,\n zero_point_A: int,\n zero_point_B: int,\n zero_point_C: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b, c are tensors on the GPU\ndef solve(\n a: torch.Tensor,\n b: torch.Tensor,\n c: torch.Tensor,\n M: int,\n N: int,\n K: int,\n scale_A: float,\n scale_B: float,\n scale_C: float,\n zero_point_A: int,\n zero_point_B: int,\n zero_point_C: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"INT8 Quantized MatMul\", atol=0, rtol=0, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " C: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " K: int,\n", + " scale_A: float,\n", + " scale_B: float,\n", + " scale_C: float,\n", + " zero_point_A: int,\n", + " zero_point_B: int,\n", + " zero_point_C: int,\n", + " ):\n", + " A = A.view(M, K).to(torch.int32)\n", + " B = B.view(K, N).to(torch.int32)\n", + " A_f = (A - zero_point_A).to(torch.float32)\n", + " B_f = (B - zero_point_B).to(torch.float32)\n", + " C_f = torch.matmul(A_f, B_f).round().int() # closest thing to integer accumulation we have\n", + " C_f = C_f * scale_A * scale_B / scale_C\n", + " C_q = torch.round(C_f).to(torch.int32) + zero_point_C\n", + " C_q = torch.clamp(C_q, -128, 127).to(torch.int8)\n", + " C.view(M, N).copy_(C_q)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_int8), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_int8), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_int8), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"scale_A\": (ctypes.c_float, \"in\"),\n", + " \"scale_B\": (ctypes.c_float, \"in\"),\n", + " \"scale_C\": (ctypes.c_float, \"in\"),\n", + " \"zero_point_A\": (ctypes.c_int, \"in\"),\n", + " \"zero_point_B\": (ctypes.c_int, \"in\"),\n", + " \"zero_point_C\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int8\n", + " device = \"cuda\"\n", + " A = torch.tensor([[1, 2], [3, 4]], dtype=dtype, device=device).flatten()\n", + " B = torch.tensor([[5, 6], [7, 8]], dtype=dtype, device=device).flatten()\n", + " C = torch.tensor([[0, 0], [0, 0]], dtype=dtype, device=device).flatten()\n", + "\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": 2,\n", + " \"N\": 2,\n", + " \"K\": 2,\n", + " \"scale_A\": 0.1,\n", + " \"scale_B\": 0.2,\n", + " \"scale_C\": 0.05,\n", + " \"zero_point_A\": 0,\n", + " \"zero_point_B\": 0,\n", + " \"zero_point_C\": 0,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int8\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # 1. 4x4x4_zero_zp\n", + " A1 = torch.randint(-128, 128, (4, 4), dtype=dtype, device=device)\n", + " B1 = torch.randint(-128, 128, (4, 4), dtype=dtype, device=device)\n", + " C1 = torch.randint(-128, 128, (4, 4), dtype=dtype, device=device)\n", + " tests.append(\n", + " {\n", + " \"A\": A1,\n", + " \"B\": B1,\n", + " \"C\": C1,\n", + " \"M\": 4,\n", + " \"N\": 4,\n", + " \"K\": 4,\n", + " \"scale_A\": 0.1,\n", + " \"scale_B\": 0.2,\n", + " \"scale_C\": 0.05,\n", + " \"zero_point_A\": 0,\n", + " \"zero_point_B\": 0,\n", + " \"zero_point_C\": 0,\n", + " }\n", + " )\n", + "\n", + " # 2. 2x3x5_nonzero_zp\n", + " A2 = torch.randint(-128, 128, (2, 5), dtype=dtype, device=device)\n", + " B2 = torch.randint(-128, 128, (5, 3), dtype=dtype, device=device)\n", + " C2 = torch.empty((2, 3), dtype=dtype, device=device)\n", + " tests.append(\n", + " {\n", + " \"A\": A2,\n", + " \"B\": B2,\n", + " \"C\": C2,\n", + " \"M\": 2,\n", + " \"N\": 3,\n", + " \"K\": 5,\n", + " \"scale_A\": 0.5,\n", + " \"scale_B\": 0.25,\n", + " \"scale_C\": 0.125,\n", + " \"zero_point_A\": 1,\n", + " \"zero_point_B\": -2,\n", + " \"zero_point_C\": 3,\n", + " }\n", + " )\n", + "\n", + " # 3. 1x1x3\n", + " A3 = torch.randint(-128, 128, (1, 3), dtype=dtype, device=device)\n", + " B3 = torch.randint(-128, 128, (3, 1), dtype=dtype, device=device)\n", + " C3 = torch.empty((1, 1), dtype=dtype, device=device)\n", + " tests.append(\n", + " {\n", + " \"A\": A3,\n", + " \"B\": B3,\n", + " \"C\": C3,\n", + " \"M\": 1,\n", + " \"N\": 1,\n", + " \"K\": 3,\n", + " \"scale_A\": 1.0,\n", + " \"scale_B\": 1.0,\n", + " \"scale_C\": 1.0,\n", + " \"zero_point_A\": 1,\n", + " \"zero_point_B\": 3,\n", + " \"zero_point_C\": 5,\n", + " }\n", + " )\n", + "\n", + " # 4. 3x5x2\n", + " A4 = torch.randint(-50, 51, (3, 2), dtype=dtype, device=device)\n", + " B4 = torch.randint(-50, 51, (2, 5), dtype=dtype, device=device)\n", + " C4 = torch.zeros((3, 5), dtype=dtype, device=device)\n", + " tests.append(\n", + " {\n", + " \"A\": A4,\n", + " \"B\": B4,\n", + " \"C\": C4,\n", + " \"M\": 3,\n", + " \"N\": 5,\n", + " \"K\": 2,\n", + " \"scale_A\": 0.05,\n", + " \"scale_B\": 0.1,\n", + " \"scale_C\": 0.01,\n", + " \"zero_point_A\": 0,\n", + " \"zero_point_B\": 0,\n", + " \"zero_point_C\": 0,\n", + " }\n", + " )\n", + "\n", + " # 5. 32x32x16\n", + " A5 = torch.randint(-128, 128, (32, 16), dtype=dtype, device=device)\n", + " B5 = torch.randint(-128, 128, (16, 32), dtype=dtype, device=device)\n", + " C5 = torch.empty((32, 32), dtype=dtype, device=device)\n", + " tests.append(\n", + " {\n", + " \"A\": A5,\n", + " \"B\": B5,\n", + " \"C\": C5,\n", + " \"M\": 32,\n", + " \"N\": 32,\n", + " \"K\": 16,\n", + " \"scale_A\": 0.2,\n", + " \"scale_B\": 0.3,\n", + " \"scale_C\": 0.1,\n", + " \"zero_point_A\": 0,\n", + " \"zero_point_B\": 0,\n", + " \"zero_point_C\": 0,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int8\n", + " device = \"cuda\"\n", + " shape_A = (8192, 2048)\n", + " shape_B = (2048, 4096)\n", + " shape_C = (8192, 4096)\n", + " A = torch.randint(-128, 128, (shape_A[0] * shape_A[1],), dtype=dtype, device=device)\n", + " B = torch.randint(-128, 128, (shape_B[0] * shape_B[1],), dtype=dtype, device=device)\n", + " C = torch.empty(shape_C[0] * shape_C[1], dtype=dtype, device=device)\n", + " M = 8192\n", + " N = 4096\n", + " K = 2048\n", + " scale_A = 0.1\n", + " scale_B = 0.1\n", + " scale_C = 0.01\n", + " zero_point_A = 0\n", + " zero_point_B = 0\n", + " zero_point_C = 0\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"scale_A\": scale_A,\n", + " \"scale_B\": scale_B,\n", + " \"scale_C\": scale_C,\n", + " \"zero_point_A\": zero_point_A,\n", + " \"zero_point_B\": zero_point_B,\n", + " \"zero_point_C\": zero_point_C,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/33_ordinary_least_squares.ipynb b/challenges/colab_exports/medium/33_ordinary_least_squares.ipynb new file mode 100644 index 00000000..73456968 --- /dev/null +++ b/challenges/colab_exports/medium/33_ordinary_least_squares.ipynb @@ -0,0 +1,709 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Solve the Ordinary Least Squares (OLS) regression problem on a GPU. Given a feature matrix $X$ of size $n\\_samples \\times n\\_features$ and a target vector $y$ of size $n\\_samples$, compute the coefficient vector $\\beta$ that minimizes the sum of squared residuals:\n $$ \\min_{\\beta} ||X\\beta - y||^2 $$\n\n The closed-form solution to OLS is:\n $$ \\beta = (X^TX)^{-1}X^Ty $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted.
    • \n
    • The solve function signature must remain unchanged.
    • \n
    • The final coefficients must be stored in the beta vector.
    • \n
    • Assume that the feature matrix $X$ is full rank (i.e., $X^TX$ is invertible).
    • \n
    \n\n

    Example:

    \n

    \nInput:
    \n$X$ (samples \u00d7 features):\n$$\n\\begin{bmatrix}\n-0.23 & -0.23 & 1.52 \\\\\n0.77 & -0.47 & 1.58 \\\\\n-0.14 & 0.65 & 0.5 \\\\\n-1.91 & -1.72 & 0.24 \\\\\n-0.46 & -0.47 & 0.54\n\\end{bmatrix}\n$$\n$y$:\n$$\n\\begin{bmatrix}\n83.01 \\\\\n93.4 \\\\\n47.33 \\\\\n-62.22 \\\\\n13.06\n\\end{bmatrix}\n$$\nOutput:
    \n$\\beta$:\n$$\n\\begin{bmatrix}\n13.97 \\\\\n29.12 \\\\\n61.05\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 n_samples \u2264 100,000
    • \n
    • 1 \u2264 n_features \u2264 1,000
    • \n
    • n_samples \u2265 n_features
    • \n
    • -1000.0 \u2264 values in X and y \u2264 1000.0
    • \n
    • Solutions are tested with absolute tolerance of 1e-2 and relative tolerance of 1e-2
    • \n\n
    • Performance is measured with n_features = 32, n_samples = 32
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// X, y, beta are device pointers\nextern \"C\" void solve(const float* X, const float* y, float* beta, int n_samples, int n_features) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# X, y, beta are tensors on the GPU\n@cute.jit\ndef solve(\n X: cute.Tensor, y: cute.Tensor, beta: cute.Tensor, n_samples: cute.Int32, n_features: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# X, y are tensors on the GPU\n@jax.jit\ndef solve(X: jax.Array, y: jax.Array, n_samples: int, n_features: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# X, y, beta are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n X: UnsafePointer[Float32, MutExternalOrigin],\n y: UnsafePointer[Float32, MutExternalOrigin],\n beta: UnsafePointer[Float32, MutExternalOrigin],\n n_samples: Int32,\n n_features: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# X, y, beta are tensors on the GPU\ndef solve(X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# X, y, beta are tensors on the GPU\ndef solve(X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Ordinary Least Squares\", atol=1e-02, rtol=1e-02, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int\n", + " ):\n", + " # Reshape tensors to their proper dimensions\n", + " X_reshaped = X.view(n_samples, n_features)\n", + " y_reshaped = y.view(n_samples)\n", + "\n", + " # Compute X^T * X\n", + " XTX = torch.matmul(X_reshaped.t(), X_reshaped)\n", + "\n", + " # Compute X^T * y\n", + " XTy = torch.matmul(X_reshaped.t(), y_reshaped)\n", + "\n", + " # Solve the system using Cholesky decomposition\n", + " L = torch.linalg.cholesky(XTX)\n", + "\n", + " # Manual forward substitution for L * z = X^T * y\n", + " z = torch.zeros_like(XTy)\n", + " for i in range(n_features):\n", + " z[i] = XTy[i]\n", + " for j in range(i):\n", + " z[i] = z[i] - L[i, j] * z[j]\n", + " z[i] = z[i] / L[i, i]\n", + "\n", + " # Manual backward substitution for L^T * beta = z\n", + " result = torch.zeros_like(z)\n", + " for i in range(n_features - 1, -1, -1):\n", + " result[i] = z[i]\n", + " for j in range(i + 1, n_features):\n", + " result[i] = result[i] - L[j, i] * result[j]\n", + " result[i] = result[i] / L[i, i]\n", + "\n", + " # Copy to output tensor\n", + " beta.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"X\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"y\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"beta\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"n_samples\": (ctypes.c_int, \"in\"),\n", + " \"n_features\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " n_samples, n_features = 5, 3\n", + " X = torch.tensor(\n", + " [\n", + " [-0.23, -0.23, 1.52],\n", + " [0.77, -0.47, 1.58],\n", + " [-0.14, 0.65, 0.5],\n", + " [-1.91, -1.72, 0.24],\n", + " [-0.46, -0.47, 0.54],\n", + " ],\n", + " dtype=dtype,\n", + " device=\"cuda\",\n", + " )\n", + " y = torch.tensor([83.01, 93.4, 47.33, -62.22, 13.06], dtype=dtype, device=\"cuda\")\n", + " beta = torch.empty(n_features, dtype=dtype, device=\"cuda\")\n", + " return {\n", + " \"X\": X.flatten(),\n", + " \"y\": y,\n", + " \"beta\": beta,\n", + " \"n_samples\": n_samples,\n", + " \"n_features\": n_features,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # Test 1: simple_1d\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.24799999594688416],\n", + " [-0.0689999982714653],\n", + " [0.3240000009536743],\n", + " [0.7620000243186951],\n", + " [-0.11699999868869781],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [\n", + " 0.12200000137090683,\n", + " -0.01899999938905239,\n", + " 0.17000000178813934,\n", + " 0.37599998712539673,\n", + " -0.05299999937415123,\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"beta\": torch.zeros(1, dtype=dtype, device=device),\n", + " \"n_samples\": 5,\n", + " \"n_features\": 1,\n", + " }\n", + " )\n", + "\n", + " # Test 2: simple_2d\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.1289999932050705, -0.45399999618530273],\n", + " [-0.1889999955892563, -0.2669999897480011],\n", + " [0.42899999022483826, -0.2070000022649765],\n", + " [0.24899999797344208, 1.0049999952316284],\n", + " [0.6309999823570251, -0.2199999988079071],\n", + " [-0.17299999296665192, 0.2280000001192093],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [\n", + " -0.40700000524520874,\n", + " -0.3709999918937683,\n", + " 0.013000000268220901,\n", + " 1.128000020980835,\n", + " 0.11500000208616257,\n", + " 0.13500000536441803,\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"beta\": torch.zeros(2, dtype=dtype, device=device),\n", + " \"n_samples\": 6,\n", + " \"n_features\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test 3: square_3x3\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.125, 0.6579999923706055, 0.6230000257492065],\n", + " [-0.8019999861717224, -0.23399999737739563, -0.8579999804496765],\n", + " [0.9290000200271606, 0.04399999976158142, 0.4740000069141388],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [1.6610000133514404, -1.930999994277954, 1.2170000076293945],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"beta\": torch.zeros(3, dtype=dtype, device=device),\n", + " \"n_samples\": 3,\n", + " \"n_features\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test 4: overdetermined_8x3\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.013000000268220901, 0.12999999523162842, -0.1979999989271164],\n", + " [-0.10199999809265137, -0.6359999775886536, -1.2979999780654907],\n", + " [0.14499999582767487, -0.43700000643730164, 0.19699999690055847],\n", + " [0.46799999475479126, -0.00800000037997961, 0.12999999523162842],\n", + " [-0.7369999885559082, 0.4009999930858612, -0.875],\n", + " [-0.24799999594688416, -0.5040000081062317, 0.013000000268220901],\n", + " [-0.061000000685453415, -0.7730000019073486, -0.30300000309944153],\n", + " [-0.6970000267028809, -0.3140000104904175, 0.16599999368190765],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [\n", + " -0.17499999701976776,\n", + " -2.618000030517578,\n", + " -0.07400000095367432,\n", + " 0.4269999861717224,\n", + " -1.2580000162124634,\n", + " -0.6259999871253967,\n", + " -1.2640000581741333,\n", + " -0.41600000858306885,\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"beta\": torch.zeros(3, dtype=dtype, device=device),\n", + " \"n_samples\": 8,\n", + " \"n_features\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test 5: medium_10x5\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [\n", + " 0.2919999957084656,\n", + " 0.6159999966621399,\n", + " 0.41100001335144043,\n", + " -0.4000000059604645,\n", + " 0.20600000023841858,\n", + " ],\n", + " [\n", + " -0.08799999952316284,\n", + " -0.03700000047683716,\n", + " -0.28299999237060547,\n", + " -0.04699999839067459,\n", + " 0.42899999022483826,\n", + " ],\n", + " [\n", + " -0.4309999942779541,\n", + " 0.00800000037997961,\n", + " 0.7829999923706055,\n", + " -0.23499999940395355,\n", + " -0.19599999487400055,\n", + " ],\n", + " [\n", + " 0.40799999237060547,\n", + " 0.03799999877810478,\n", + " -0.05000000074505806,\n", + " 0.8119999766349792,\n", + " -0.6679999828338623,\n", + " ],\n", + " [\n", + " -0.06800000369548798,\n", + " -0.23899999260902405,\n", + " -0.796999990940094,\n", + " -0.4339999854564667,\n", + " -0.01600000075995922,\n", + " ],\n", + " [\n", + " -0.7639999985694885,\n", + " -0.06199999898672104,\n", + " -0.13099999725818634,\n", + " 0.49799999594688416,\n", + " 0.1589999943971634,\n", + " ],\n", + " [\n", + " -0.01899999938905239,\n", + " -0.03400000184774399,\n", + " -0.22100000083446503,\n", + " -0.23999999463558197,\n", + " 0.026000000536441803,\n", + " ],\n", + " [\n", + " -0.4869999885559082,\n", + " -0.7170000076293945,\n", + " -0.18000000715255737,\n", + " 0.22699999809265137,\n", + " -0.40299999713897705,\n", + " ],\n", + " [\n", + " -1.347000002861023,\n", + " 0.25099998712539673,\n", + " -0.0020000000949949026,\n", + " -0.19599999487400055,\n", + " -0.07800000160932541,\n", + " ],\n", + " [\n", + " 0.22499999403953552,\n", + " 0.593999981880188,\n", + " -0.16699999570846558,\n", + " -0.057999998331069946,\n", + " 0.9179999828338623,\n", + " ],\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [\n", + " 1.1009999513626099,\n", + " 0.4620000123977661,\n", + " -0.007000000216066837,\n", + " 0.1420000046491623,\n", + " -2.3970000743865967,\n", + " 0.7590000033378601,\n", + " -0.796999990940094,\n", + " -1.7799999713897705,\n", + " -1.003000020980835,\n", + " 2.617000102996826,\n", + " ],\n", + " dtype=dtype,\n", + " device=device,\n", + " ),\n", + " \"beta\": torch.zeros(5, dtype=dtype, device=device),\n", + " \"n_samples\": 10,\n", + " \"n_features\": 5,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " n_samples = 32\n", + " n_features = 32\n", + " X = torch.eye(n_samples, dtype=dtype, device=device)\n", + " y = torch.ones(n_samples, dtype=dtype, device=device)\n", + " beta = torch.zeros(n_features, dtype=dtype, device=device)\n", + " return {\n", + " \"X\": X.flatten(), # flattened as in your other examples,\n", + " \"y\": y,\n", + " \"beta\": beta,\n", + " \"n_samples\": n_samples,\n", + " \"n_features\": n_features,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/34_logistic_regression.ipynb b/challenges/colab_exports/medium/34_logistic_regression.ipynb new file mode 100644 index 00000000..5744b020 --- /dev/null +++ b/challenges/colab_exports/medium/34_logistic_regression.ipynb @@ -0,0 +1,621 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Solve the logistic regression problem on a GPU. Given a feature matrix $X$ of size $n\\_samples \\times n\\_features$ and a binary target vector $y$ of size $n\\_samples$ (containing only 0s and 1s), compute the coefficient vector $\\beta$ that maximizes the log-likelihood:\n $$ \\max_{\\beta} \\sum_{i=1}^{n} \\left[ y_i \\log(p_i) + (1-y_i) \\log(1-p_i) \\right] $$\n\n where $p_i = \\sigma(X_i^T \\beta)$ and $\\sigma(z) = \\frac{1}{1 + e^{-z}}$ is the sigmoid function.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final coefficients must be stored in the beta vector
    • \n
    • The target vector y contains only binary values (0 and 1)
    • \n
    \n\n

    Example:

    \n

    \nInput:
    \n$X$ (samples \u00d7 features):\n$$\n\\begin{bmatrix}\n2.0 & 1.0 \\\\\n1.0 & 2.0 \\\\\n3.0 & 3.0 \\\\\n1.5 & 2.5 \\\\\n-1.0 & -2.0 \\\\\n-2.0 & -1.0 \\\\\n-1.5 & -2.5 \\\\\n-3.0 & -3.0\n\\end{bmatrix}\n$$\n$y$:\n$$\n\\begin{bmatrix}\n1 \\\\\n1 \\\\\n1 \\\\\n0 \\\\\n0 \\\\\n0 \\\\\n1 \\\\\n0\n\\end{bmatrix}\n$$\nOutput:
    \n$\\beta$:\n$$\n\\begin{bmatrix}\n2.26 \\\\\n-1.29\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 n_samples \u2264 100,000
    • \n
    • 1 \u2264 n_features \u2264 1,000
    • \n
    • n_samples \u2265 n_features
    • \n
    • -10.0 \u2264 values in X \u2264 10.0
    • \n
    • y contains only binary values: 0 or 1
    • \n
    • Solutions are tested with absolute tolerance of 1e-2 and relative tolerance of 1e-2
    • \n\n
    • Performance is measured with n_features = 8, n_samples = 16
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// X, y, beta are device pointers\nextern \"C\" void solve(const float* X, const float* y, float* beta, int n_samples, int n_features) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# X, y, beta are tensors on the GPU\n@cute.jit\ndef solve(\n X: cute.Tensor, y: cute.Tensor, beta: cute.Tensor, n_samples: cute.Int32, n_features: cute.Int32\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# X, y are tensors on the GPU\n@jax.jit\ndef solve(X: jax.Array, y: jax.Array, n_samples: int, n_features: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# X, y, beta are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n X: UnsafePointer[Float32, MutExternalOrigin],\n y: UnsafePointer[Float32, MutExternalOrigin],\n beta: UnsafePointer[Float32, MutExternalOrigin],\n n_samples: Int32,\n n_features: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# X, y, beta are tensors on the GPU\ndef solve(X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# X, y, beta are tensors on the GPU\ndef solve(X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Logistic Regression\", atol=1e-02, rtol=1e-02, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, X: torch.Tensor, y: torch.Tensor, beta: torch.Tensor, n_samples: int, n_features: int\n", + " ):\n", + " \"\"\"\n", + " Logistic regression using Newton-Raphson (IRLS) in PyTorch.\n", + " This converges faster and more accurately than plain gradient descent.\n", + " \"\"\"\n", + " assert X.dtype == torch.float32\n", + " assert y.dtype == torch.float32\n", + " assert beta.dtype == torch.float32\n", + " assert X.shape == (n_samples, n_features)\n", + " assert y.shape == (n_samples,)\n", + " assert beta.shape == (n_features,)\n", + "\n", + " X_reshaped = X.view(n_samples, n_features)\n", + " y_reshaped = y.view(n_samples)\n", + " beta.zero_()\n", + "\n", + " max_iter = 1000\n", + " tol = 1e-8\n", + " l2_reg = 1e-6\n", + "\n", + " for _ in range(max_iter):\n", + " z = torch.mv(X_reshaped, beta)\n", + " p = torch.sigmoid(z)\n", + " W = p * (1 - p)\n", + " W = torch.clamp(W, min=1e-8)\n", + "\n", + " # Gradient\n", + " gradient = torch.mv(X_reshaped.t(), p - y_reshaped) + l2_reg * beta\n", + "\n", + " # Hessian\n", + " XW = X_reshaped * W.unsqueeze(1)\n", + " hessian = torch.mm(X_reshaped.t(), XW) + l2_reg * torch.eye(\n", + " n_features, device=X.device, dtype=X.dtype\n", + " )\n", + "\n", + " # Solve H @ delta = gradient\n", + " try:\n", + " delta = torch.linalg.solve(hessian, gradient)\n", + " except RuntimeError:\n", + " delta = torch.linalg.lstsq(hessian, gradient.unsqueeze(1)).solution.squeeze()\n", + "\n", + " beta_new = beta - delta\n", + "\n", + " if torch.norm(beta_new - beta) < tol:\n", + " beta.copy_(beta_new)\n", + " break\n", + "\n", + " beta.copy_(beta_new)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"X\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"y\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"beta\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"n_samples\": (ctypes.c_int, \"in\"),\n", + " \"n_features\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " X = torch.tensor(\n", + " [\n", + " [2.0, 1.0],\n", + " [1.0, 2.0],\n", + " [3.0, 3.0],\n", + " [1.5, 2.5],\n", + " [-1.0, -2.0],\n", + " [-2.0, -1.0],\n", + " [-1.5, -2.5],\n", + " [-3.0, -3.0],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " y = torch.tensor([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0], device=\"cuda\", dtype=dtype)\n", + " beta = torch.zeros(2, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"X\": X,\n", + " \"y\": y,\n", + " \"beta\": beta,\n", + " \"n_samples\": 8,\n", + " \"n_features\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # simple_1d\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.24799999594688416],\n", + " [-0.0689999982714653],\n", + " [0.3240000009536743],\n", + " [0.7620000243186951],\n", + " [-0.11699999868869781],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"y\": torch.tensor([1.0, 1.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"n_samples\": 5,\n", + " \"n_features\": 1,\n", + " }\n", + " )\n", + "\n", + " # simple_2d\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.1289999932050705, -0.45399999618530273],\n", + " [-0.1889999955892563, -0.2669999897480011],\n", + " [0.42899999022483826, -0.2070000022649765],\n", + " [0.24899999797344208, 1.0049999952316284],\n", + " [0.6309999823570251, -0.2199999988079071],\n", + " [-0.17299999296665192, 0.2280000001192093],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"y\": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.zeros(2, device=\"cuda\", dtype=dtype),\n", + " \"n_samples\": 6,\n", + " \"n_features\": 2,\n", + " }\n", + " )\n", + "\n", + " # square_3x3\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.125, 0.6579999923706055, 0.6230000257492065],\n", + " [-0.8019999861717224, -0.23399999737739563, -0.8579999804496765],\n", + " [0.9290000200271606, 0.04399999976158142, 0.4740000069141388],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"y\": torch.tensor([1.0, 0.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.zeros(3, device=\"cuda\", dtype=dtype),\n", + " \"n_samples\": 3,\n", + " \"n_features\": 3,\n", + " }\n", + " )\n", + "\n", + " # overdetermined_8x3\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.013000000268220901, 0.12999999523162842, -0.1979999989271164],\n", + " [-0.10199999809265137, -0.6359999775886536, -1.2979999780654907],\n", + " [0.14499999582767487, -0.43700000643730164, 0.19699999690055847],\n", + " [0.46799999475479126, -0.00800000037997961, 0.12999999523162842],\n", + " [-0.7369999885559082, 0.4009999930858612, -0.875],\n", + " [-0.24799999594688416, -0.5040000081062317, 0.013000000268220901],\n", + " [-0.061000000685453415, -0.7730000019073486, -0.30300000309944153],\n", + " [-0.6970000267028809, -0.3140000104904175, 0.16599999368190765],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"beta\": torch.zeros(3, device=\"cuda\", dtype=dtype),\n", + " \"n_samples\": 8,\n", + " \"n_features\": 3,\n", + " }\n", + " )\n", + "\n", + " # medium_10x3\n", + " tests.append(\n", + " {\n", + " \"X\": torch.tensor(\n", + " [\n", + " [0.2919999957084656, 0.6159999966621399, 0.41100001335144043],\n", + " [-0.4000000059604645, 0.20600000023841858, -0.08799999952316284],\n", + " [-0.03700000047683716, -0.28299999237060547, -0.04699999839067459],\n", + " [0.42899999022483826, -0.4309999942779541, 0.00800000037997961],\n", + " [0.7829999923706055, -0.23499999940395355, -0.19599999487400055],\n", + " [0.40799999237060547, 0.03799999877810478, -0.05000000074505806],\n", + " [0.8119999766349792, -0.6679999828338623, -0.06800000369548798],\n", + " [-0.23899999260902405, -0.796999990940094, -0.4339999854564667],\n", + " [-0.01600000075995922, -0.7639999985694885, -0.06199999898672104],\n", + " [-0.13099999725818634, 0.49799999594688416, 0.1589999943971634],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"y\": torch.tensor(\n", + " [1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"beta\": torch.zeros(3, device=\"cuda\", dtype=dtype),\n", + " \"n_samples\": 10,\n", + " \"n_features\": 3,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + "\n", + " X = torch.eye(8, device=device, dtype=dtype).repeat(2, 1)\n", + " y = torch.tensor(\n", + " [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " beta = torch.zeros(8, device=device, dtype=dtype)\n", + "\n", + " return {\n", + " \"X\": X,\n", + " \"y\": y,\n", + " \"beta\": beta,\n", + " \"n_samples\": 16,\n", + " \"n_features\": 8,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/35_monte_carlo_integration.ipynb b/challenges/colab_exports/medium/35_monte_carlo_integration.ipynb new file mode 100644 index 00000000..1736ba36 --- /dev/null +++ b/challenges/colab_exports/medium/35_monte_carlo_integration.ipynb @@ -0,0 +1,507 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement Monte Carlo integration on a GPU. Given a set of function values $y_i = f(x_i)$ sampled at random points $x_i$ uniformly distributed in the interval $[a, b]$, estimate the definite integral:\n $$ \\int_a^b f(x) \\, dx \\approx (b - a) \\cdot \\frac{1}{n} \\sum_{i=1}^{n} y_i $$\n\n The Monte Carlo method approximates the integral by computing the average of the function values and multiplying by the interval width.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the result variable
    • \n
    • Solutions are tested with absolute tolerance of 1e-2 and relative tolerance of 1e-2
    • \n
    \n\n

    Example:

    \n
    \nInput:  a = 0, b = 2, n_samples = 8\n        y_samples = [0.0625, 0.25, 0.5625, 1.0, 1.5625, 2.25, 3.0625, 4.0]\nOutput: result = 3.1875\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 n_samples \u2264 100,000,000
    • \n
    • -1000.0 \u2264 a < b \u2264 1000.0
    • \n
    • -10000.0 \u2264 function values \u2264 10000.0
    • \n
    • The tolerance is set to 1e-2 to account for the inherent randomness in Monte Carlo methods and floating-point precision variations.
    • \n\n
    • Performance is measured with n_samples = 10,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// y_samples, result are device pointers\nextern \"C\" void solve(const float* y_samples, float* result, float a, float b, int n_samples) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# y_samples, result are tensors on the GPU\n@cute.jit\ndef solve(\n y_samples: cute.Tensor,\n result: cute.Tensor,\n a: cute.Float32,\n b: cute.Float32,\n n_samples: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# y_samples is a tensor on the GPU\n@jax.jit\ndef solve(y_samples: jax.Array, a: float, b: float, n_samples: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# y_samples, result are device pointers\n@export\ndef solve(\n y_samples: UnsafePointer[Float32, MutExternalOrigin],\n result: UnsafePointer[Float32, MutExternalOrigin],\n a: Float32,\n b: Float32,\n n_samples: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# y_samples, result are tensors on the GPU\ndef solve(y_samples: torch.Tensor, result: torch.Tensor, a: float, b: float, n_samples: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# y_samples, result are tensors on the GPU\ndef solve(y_samples: torch.Tensor, result: torch.Tensor, a: float, b: float, n_samples: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Monte Carlo Integration\", atol=1e-02, rtol=1e-02, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, y_samples: torch.Tensor, result: torch.Tensor, a: float, b: float, n_samples: int\n", + " ):\n", + " assert y_samples.shape == (n_samples,)\n", + " assert result.shape == (1,)\n", + " assert y_samples.dtype == result.dtype\n", + " assert y_samples.device == result.device\n", + " assert b > a\n", + "\n", + " # Monte Carlo integration: integral \u2248 (b - a) * mean(y_samples)\n", + " mean_y = torch.mean(y_samples)\n", + " integral = (b - a) * mean_y\n", + "\n", + " result[0] = integral\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"y_samples\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"result\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"a\": (ctypes.c_float, \"in\"),\n", + " \"b\": (ctypes.c_float, \"in\"),\n", + " \"n_samples\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " y_samples = torch.tensor(\n", + " [0.0625, 0.25, 0.5625, 1.0, 1.5625, 2.25, 3.0625, 4.0], device=\"cuda\", dtype=dtype\n", + " )\n", + " result = torch.zeros(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"y_samples\": y_samples,\n", + " \"result\": result,\n", + " \"a\": 0.0,\n", + " \"b\": 2.0,\n", + " \"n_samples\": 8,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_specs = [\n", + " # Basic test cases\n", + " (\"basic_8\", [0.0625, 0.25, 0.5625, 1.0, 1.5625, 2.25, 3.0625, 4.0], 0.0, 2.0),\n", + " (\"constant_function\", [1.0, 1.0, 1.0, 1.0], 0.0, 4.0),\n", + " (\"linear_function\", [0.0, 1.0, 2.0, 3.0], 0.0, 3.0),\n", + " (\"negative_interval\", [-1.0, -2.0, -3.0], -2.0, 1.0),\n", + " (\"small_interval\", [0.5, 1.5], 1.0, 2.0),\n", + " ]\n", + "\n", + " test_cases = []\n", + " for _, y_vals, a, b in test_specs:\n", + " n_samples = len(y_vals)\n", + " test_cases.append(\n", + " {\n", + " \"y_samples\": torch.tensor(y_vals, device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"a\": a,\n", + " \"b\": b,\n", + " \"n_samples\": n_samples,\n", + " }\n", + " )\n", + "\n", + " # Random test cases with different sizes\n", + " for _, n_samples, a, b in [\n", + " (\"small_samples\", 10, 0.0, 1.0),\n", + " (\"medium_samples\", 100, -1.0, 1.0),\n", + " (\"large_samples\", 1000, 0.0, 10.0),\n", + " (\"many_samples\", 10000, -5.0, 5.0),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"y_samples\": torch.empty(n_samples, device=\"cuda\", dtype=dtype).uniform_(\n", + " -10.0, 10.0\n", + " ),\n", + " \"result\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"a\": a,\n", + " \"b\": b,\n", + " \"n_samples\": n_samples,\n", + " }\n", + " )\n", + "\n", + " # Edge cases\n", + " for _, n_samples, a, b in [\n", + " (\"min_samples\", 1, 0.0, 1.0),\n", + " (\"large_interval\", 100, -100.0, 100.0),\n", + " (\"small_interval_edge\", 50, 0.0, 0.1),\n", + " ]:\n", + " test_cases.append(\n", + " {\n", + " \"y_samples\": torch.empty(n_samples, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1.0, 1.0\n", + " ),\n", + " \"result\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"a\": a,\n", + " \"b\": b,\n", + " \"n_samples\": n_samples,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " n_samples = 10000000\n", + " return {\n", + " \"y_samples\": torch.empty(n_samples, device=\"cuda\", dtype=dtype).uniform_(\n", + " -1000.0, 1000.0\n", + " ),\n", + " \"result\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " \"a\": -10.0,\n", + " \"b\": 10.0,\n", + " \"n_samples\": n_samples,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/37_matrix_power.ipynb b/challenges/colab_exports/medium/37_matrix_power.ipynb new file mode 100644 index 00000000..61066bd6 --- /dev/null +++ b/challenges/colab_exports/medium/37_matrix_power.ipynb @@ -0,0 +1,518 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that raises a square matrix $A$ of size $N \\times N$ to an integer power $P$.
    \n The solve function receives a flattened input matrix input (row-major order), an empty output matrix output of the same size, the dimension N, and the exponent P.
    \n You must compute $\\text{output} = A^{P}$ where matrix multiplication is standard dense multiplication over 32-bit floating point numbers.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted.
    • \n
    • The solve function signature must remain unchanged.
    • \n
    • The final result must be written to the output array in row-major order.
    • \n
    \n\n

    Example 1:

    \n
    \n  Input:\n    input  = [[1.0, 2.0],\n              [3.0, 4.0]]\n    N      = 2\n    P      = 3\n  Output:\n    output = [[37.0, 54.0],\n              [81.0, 118.0]]\n  
    \n\n

    Example 2:

    \n
    \n  Input:\n    input  = [[1.0, 0.0, 2.0],\n              [0.0, 1.0, 0.0],\n              [3.0, 0.0, 0.0]]\n    N      = 3\n    P      = 2\n  Output:\n    output = [[7.0, 0.0, 2.0],\n              [0.0, 1.0, 0.0],\n              [3.0, 0.0, 6.0]]\n  
    \n\n

    Constraints

    \n
      \n
    • $1 \\le N \\le 1024$
    • \n
    • $1 \\le P \\le 20$
    • \n
    • Elements of input satisfy $-10.0 \\le A_{ij} \\le 10.0$
    • \n\n
    • Performance is measured with N = 512
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N, int P) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, P: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, P: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n P: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, P: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, P: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Matrix Power\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, P: int):\n", + " \"\"\"\n", + " Matrix power implementation using PyTorch.\n", + " Raises an N x N matrix to integer power P.\n", + " \"\"\"\n", + " assert input.dtype == torch.float32\n", + " assert output.dtype == torch.float32\n", + " assert input.shape == output.shape == (N * N,)\n", + " assert P >= 1\n", + "\n", + " mat = input.view(N, N)\n", + " result = torch.linalg.matrix_power(mat, P).float()\n", + " output[:] = result.reshape(-1)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"P\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 2\n", + " P = 3\n", + " input_data = torch.tensor([[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype).flatten()\n", + " output_data = torch.zeros((2, 2), device=\"cuda\", dtype=dtype).flatten()\n", + "\n", + " return {\n", + " \"input\": input_data,\n", + " \"output\": output_data,\n", + " \"N\": N,\n", + " \"P\": P,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + "\n", + " # Test case 1: example 2x2 power 3\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0], [3.0, 4.0]], device=\"cuda\", dtype=dtype\n", + " ).flatten(),\n", + " \"output\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 2,\n", + " \"P\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: identity 3x3 power 5\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.eye(3, device=\"cuda\", dtype=dtype).flatten(),\n", + " \"output\": torch.zeros((3, 3), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 3,\n", + " \"P\": 5,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: random 5x5 power 2\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty((5, 5), device=\"cuda\", dtype=dtype)\n", + " .uniform_(-5.0, 5.0)\n", + " .flatten(),\n", + " \"output\": torch.zeros((5, 5), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 5,\n", + " \"P\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: random 16x16 power 3\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty((16, 16), device=\"cuda\", dtype=dtype)\n", + " .uniform_(-1.0, 1.0)\n", + " .flatten(),\n", + " \"output\": torch.zeros((16, 16), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 16,\n", + " \"P\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: random 8x8 power 4\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty((8, 8), device=\"cuda\", dtype=dtype)\n", + " .uniform_(-10.0, 10.0)\n", + " .flatten(),\n", + " \"output\": torch.zeros((8, 8), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 8,\n", + " \"P\": 4,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: random 10x10 power 1\n", + " test_cases.append(\n", + " {\n", + " \"input\": torch.empty((10, 10), device=\"cuda\", dtype=dtype)\n", + " .uniform_(-2.0, 2.0)\n", + " .flatten(),\n", + " \"output\": torch.zeros((10, 10), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": 10,\n", + " \"P\": 1,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 512\n", + " P = 3\n", + " return {\n", + " \"input\": torch.empty((N, N), device=\"cuda\", dtype=dtype)\n", + " .uniform_(-10.0, 10.0)\n", + " .flatten(),\n", + " \"output\": torch.zeros((N, N), device=\"cuda\", dtype=dtype).flatten(),\n", + " \"N\": N,\n", + " \"P\": P,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/38_nearest_neighbor.ipynb b/challenges/colab_exports/medium/38_nearest_neighbor.ipynb new file mode 100644 index 00000000..ba7fb595 --- /dev/null +++ b/challenges/colab_exports/medium/38_nearest_neighbor.ipynb @@ -0,0 +1,599 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that, for N three-dimensional points stored on the device, fills indices[i] with the index j \u2260 i of the point closest to points[i]. Comparing squared Euclidean distance is sufficient\u2014you do not need to compute square-roots.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • The solve function signature must remain unchanged
    • \n
    • External libraries are not permitted
    • \n
    • The final result must be stored in the indices array
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  points  = [(0,0,0), (1,0,0), (5,5,5)]\n        indices = [-1, -1, -1]\n        N       = 3\nOutput: indices = [1, 0, 1]   # 0\u21c61 are nearest, 2 is closest to 1
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 100,000
    • \n
    • Coordinates are 32-bit floats in the range [-1000, 1000]
    • \n\n
    • Performance is measured with N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// points and indices are device pointers\nextern \"C\" void solve(const float* points, int* indices, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# points, indices are tensors on the GPU\n@cute.jit\ndef solve(points: cute.Tensor, indices: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# points is a tensor on the GPU\n@jax.jit\ndef solve(points: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.memory import UnsafePointer\n\n\n# points and indices are device pointers\n@export\ndef solve(\n points: UnsafePointer[Float32, MutExternalOrigin],\n indices: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# points and indices are tensors on the GPU\ndef solve(points: torch.Tensor, indices: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# points and indices are tensors on the GPU\ndef solve(points: torch.Tensor, indices: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Nearest Neighbor\", atol=0, rtol=0, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, points: torch.Tensor, indices: torch.Tensor, N: int):\n", + " \"\"\"\n", + " Reference implementation that finds the nearest neighbor for each point.\n", + " For N three-dimensional points, fills indices[i] with the index j\u2260i\n", + " of the point closest to points[i].\n", + " \"\"\"\n", + " assert points.dtype == torch.float32\n", + " assert indices.dtype == torch.int32\n", + " assert points.shape == (N * 3,) # N points, each with 3 coordinates\n", + " assert indices.shape == (N,)\n", + " assert N >= 1\n", + "\n", + " # Reshape points to (N, 3) for easier processing\n", + " pts = points.view(N, 3)\n", + "\n", + " # pts shape: (N, 3)\n", + " # Expand to (N, 1, 3) and (1, N, 3) for broadcasting\n", + " pts_expand1 = pts.unsqueeze(1) # (N, 1, 3)\n", + " pts_expand2 = pts.unsqueeze(0) # (1, N, 3)\n", + "\n", + " # Compute all pairwise squared distances: (N, N)\n", + " diff = pts_expand1 - pts_expand2 # (N, N, 3)\n", + " dist_sq = torch.sum(diff * diff, dim=2) # (N, N)\n", + "\n", + " # Mask diagonal (distance to self) with large value\n", + " mask = torch.eye(N, device=points.device, dtype=torch.bool)\n", + " dist_sq[mask] = float(\"inf\")\n", + "\n", + " # Find nearest neighbor indices\n", + " indices.copy_(torch.argmin(dist_sq, dim=1).int())\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"points\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"indices\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " N = 3\n", + "\n", + " # Example: points = [(0,0,0), (1,0,0), (5,5,5)]\n", + " points_data = torch.tensor(\n", + " [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 5.0, 5.0, 5.0], # point 0 # point 1 # point 2\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " )\n", + " indices_data = torch.full((N,), -1, device=\"cuda\", dtype=dtype_int)\n", + "\n", + " return {\n", + " \"points\": points_data,\n", + " \"indices\": indices_data,\n", + " \"N\": N,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " test_cases = []\n", + "\n", + " # Test case 1: Basic example from problem description\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.tensor(\n", + " [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 5.0, 5.0, 5.0], # point 0 # point 1 # point 2\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"indices\": torch.full((3,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: Two points only\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.tensor(\n", + " [0.0, 0.0, 0.0, 3.0, 4.0, 0.0], # point 0 # point 1\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"indices\": torch.full((2,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: Four points in a square\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.tensor(\n", + " [\n", + " 0.0,\n", + " 0.0,\n", + " 0.0, # point 0\n", + " 1.0,\n", + " 0.0,\n", + " 0.0, # point 1\n", + " 0.0,\n", + " 1.0,\n", + " 0.0, # point 2\n", + " 1.0,\n", + " 1.0,\n", + " 0.0,\n", + " ], # point 3\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"indices\": torch.full((4,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: Points with negative coordinates\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.tensor(\n", + " [\n", + " -1.0,\n", + " -1.0,\n", + " -1.0, # point 0\n", + " 1.0,\n", + " 1.0,\n", + " 1.0, # point 1\n", + " 0.0,\n", + " 0.0,\n", + " 0.0, # point 2\n", + " 2.0,\n", + " 2.0,\n", + " 2.0,\n", + " ], # point 3\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"indices\": torch.full((4,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: Points with clear unique nearest neighbors\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.tensor(\n", + " [\n", + " 0.0,\n", + " 0.0,\n", + " 0.0, # point 0\n", + " 10.0,\n", + " 0.0,\n", + " 0.0, # point 1\n", + " 1.0,\n", + " 0.0,\n", + " 0.0, # point 2 (closest to 0)\n", + " 11.0,\n", + " 0.0,\n", + " 0.0, # point 3 (closest to 1)\n", + " 5.0,\n", + " 0.0,\n", + " 0.0,\n", + " ], # point 4\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"indices\": torch.full((5,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: Medium random test with fixed seed for reproducibility\n", + " torch.manual_seed(42)\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.empty((100, 3), device=\"cuda\", dtype=dtype_float)\n", + " .uniform_(-100.0, 100.0)\n", + " .flatten(),\n", + " \"indices\": torch.full((100,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 100,\n", + " }\n", + " )\n", + "\n", + " # Test case 7: Larger test with fixed seed\n", + " torch.manual_seed(123)\n", + " test_cases.append(\n", + " {\n", + " \"points\": torch.empty((250, 3), device=\"cuda\", dtype=dtype_float)\n", + " .uniform_(-1000.0, 1000.0)\n", + " .flatten(),\n", + " \"indices\": torch.full((250,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": 250,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " N = 10000\n", + "\n", + " return {\n", + " \"points\": torch.empty((N, 3), device=\"cuda\", dtype=dtype_float)\n", + " .uniform_(-1000.0, 1000.0)\n", + " .flatten(),\n", + " \"indices\": torch.full((N,), -1, device=\"cuda\", dtype=dtype_int),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/40_batch_normalization.ipynb b/challenges/colab_exports/medium/40_batch_normalization.ipynb new file mode 100644 index 00000000..96ba439f --- /dev/null +++ b/challenges/colab_exports/medium/40_batch_normalization.ipynb @@ -0,0 +1,601 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement batch normalization forward pass for 2D input tensors. Given an input tensor of shape [N, C] where N is the batch size and C is the number of features, compute the normalized output using learnable scale (gamma) and shift (beta) parameters.\n

    \n\n

    \n For each feature channel j, batch normalization computes:\n $$\n \\begin{align}\n \\mu_j &= \\frac{1}{N} \\sum_{i=1}^{N} x_{i,j} \\\\\n \\sigma_j^2 &= \\frac{1}{N} \\sum_{i=1}^{N} (x_{i,j} - \\mu_j)^2 \\\\\n \\hat{x}_{i,j} &= \\frac{x_{i,j} - \\mu_j}{\\sqrt{\\sigma_j^2 + \\epsilon}} \\\\\n y_{i,j} &= \\gamma_j \\hat{x}_{i,j} + \\beta_j\n \\end{align}\n $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output tensor
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  input = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]  (N=3, C=2)\n        gamma = [1.0, 1.0]\n        beta = [0.0, 0.0]\n        eps = 1e-5\nOutput: output = [[-1.224, -1.224], [0.0, 0.0], [1.224, 1.224]]\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [[0.0, 1.0], [2.0, 3.0]]  (N=2, C=2)\n        gamma = [2.0, 0.5]\n        beta = [1.0, -1.0]\n        eps = 1e-5\nOutput: output = [[-1.0, -1.5], [3.0, -0.5]]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 10,000
    • \n
    • 1 \u2264 C \u2264 1,024
    • \n
    • eps = 1e-5
    • \n
    • -100.0 \u2264 input values \u2264 100.0
    • \n
    • 0.1 \u2264 gamma values \u2264 10.0
    • \n
    • -10.0 \u2264 beta values \u2264 10.0
    • \n\n
    • Performance is measured with N = 5,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, gamma, beta, output are device pointers\nextern \"C\" void solve(const float* input, const float* gamma, const float* beta, float* output,\n int N, int C, float eps) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, gamma, beta, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n gamma: cute.Tensor,\n beta: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n C: cute.Int32,\n eps: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, gamma, beta are tensors on the GPU\n@jax.jit\ndef solve(\n input: jax.Array, gamma: jax.Array, beta: jax.Array, N: int, C: int, eps: float\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, gamma, beta, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n gamma: UnsafePointer[Float32, MutExternalOrigin],\n beta: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n C: Int32,\n eps: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, gamma, beta, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n gamma: torch.Tensor,\n beta: torch.Tensor,\n output: torch.Tensor,\n N: int,\n C: int,\n eps: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, gamma, beta, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n gamma: torch.Tensor,\n beta: torch.Tensor,\n output: torch.Tensor,\n N: int,\n C: int,\n eps: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Batch Normalization\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " gamma: torch.Tensor,\n", + " beta: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " C: int,\n", + " eps: float,\n", + " ):\n", + " assert input.shape == output.shape == (N, C)\n", + " assert gamma.shape == beta.shape == (C,)\n", + " assert input.dtype == gamma.dtype == beta.dtype == output.dtype\n", + " assert input.device == gamma.device == beta.device == output.device\n", + "\n", + " # Compute mean and variance for each feature channel\n", + " mean = torch.mean(input, dim=0) # Shape: [C]\n", + " variance = torch.var(input, dim=0, unbiased=False) # Shape: [C]\n", + "\n", + " # Normalize\n", + " normalized = (input - mean) / torch.sqrt(variance + eps)\n", + "\n", + " # Scale and shift\n", + " output.copy_(gamma * normalized + beta)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"gamma\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"beta\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"C\": (ctypes.c_int, \"in\"),\n", + " \"eps\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N, C = 3, 2\n", + " input = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype)\n", + " gamma = torch.tensor([1.0, 1.0], device=\"cuda\", dtype=dtype)\n", + " beta = torch.tensor([0.0, 0.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty((N, C), device=\"cuda\", dtype=dtype)\n", + " eps = 1e-5\n", + " return {\n", + " \"input\": input,\n", + " \"gamma\": gamma,\n", + " \"beta\": beta,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": eps,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_small\n", + " N, C = 3, 2\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"gamma\": torch.tensor([1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.tensor([0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # single_batch\n", + " N, C = 1, 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1.0, 2.0, 3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": torch.tensor([1.0, 1.0, 1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.tensor([0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # all_zeros\n", + " N, C = 4, 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros((N, C), device=\"cuda\", dtype=dtype),\n", + " \"gamma\": torch.ones(C, device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.zeros(C, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # negative_numbers\n", + " N, C = 2, 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[-1.0, -2.0, -3.0], [-4.0, -5.0, -6.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"gamma\": torch.tensor([1.0, 1.0, 1.0], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.tensor([0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # different_gamma_beta\n", + " N, C = 2, 2\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[0.0, 1.0], [2.0, 3.0]], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": torch.tensor([2.0, 0.5], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.tensor([1.0, -1.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # large_values\n", + " N, C = 5, 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((N, C), device=\"cuda\", dtype=dtype).uniform_(-50.0, 50.0),\n", + " \"gamma\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(0.5, 2.0),\n", + " \"beta\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " N, C = 64, 32\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((N, C), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"gamma\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(0.5, 2.0),\n", + " \"beta\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # single_feature\n", + " N, C = 100, 1\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((N, C), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"gamma\": torch.tensor([1.5], device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.tensor([0.5], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # high_variance\n", + " N, C = 10, 5\n", + " input_data = torch.empty((N, C), device=\"cuda\", dtype=dtype)\n", + " for i in range(C):\n", + " input_data[:, i] = torch.linspace(\n", + " -100 + i * 10, 100 - i * 10, N, device=\"cuda\", dtype=dtype\n", + " )\n", + " tests.append(\n", + " {\n", + " \"input\": input_data,\n", + " \"gamma\": torch.ones(C, device=\"cuda\", dtype=dtype),\n", + " \"beta\": torch.zeros(C, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N, C = 5000, 512\n", + " return {\n", + " \"input\": torch.empty((N, C), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"gamma\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(0.5, 2.0),\n", + " \"beta\": torch.empty(C, device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0),\n", + " \"output\": torch.empty((N, C), device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"eps\": 1e-5,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/42_2d_max_pooling.ipynb b/challenges/colab_exports/medium/42_2d_max_pooling.ipynb new file mode 100644 index 00000000..da880140 --- /dev/null +++ b/challenges/colab_exports/medium/42_2d_max_pooling.ipynb @@ -0,0 +1,774 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a 2D max pooling operation for image/feature map downsampling.\n The program should take an input tensor and produce an output tensor by applying max pooling with specified kernel size, stride, and padding.\n

    \n\n\n \n \n \n \n \n \n\n \n Input (4x4)\n\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n \n 1\n 3\n 2\n 4\n \n 5\n 8\n 6\n 7\n \n 9\n 2\n 4\n 3\n \n 1\n 6\n 5\n 8\n\n \n max\n \n\n \n Output (2x2)\n\n \n \n\n \n \n \n\n \n \n \n \n \n\n \n 8\n 7\n 9\n 8\n\n \n kernel: 2x2\n stride: 2\n padding: 0\n\n \n dashed borders = pooling windows\n\n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in tensor output
    • \n
    \n\n

    Max Pooling Operation

    \n

    \n For each output position (n, c, h_out, w_out), compute the maximum value over the corresponding input window:\n
    \n output[n, c, h_out, w_out] = max(input[n, c, h:h+kernel_size, w:w+kernel_size])\n
    \n where h = h_out * stride and w = w_out * stride\n

    \n\n

    Example 1:

    \n
    \nInput:  input = [[[[1.0, 2.0, 3.0],\n                   [4.0, 5.0, 6.0],\n                   [7.0, 8.0, 9.0]]]]\n        kernel_size = 2\n        stride = 1\n        padding = 0\nOutput: output = [[[[5.0, 6.0],\n                    [8.0, 9.0]]]]\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [[[[1.0, 2.0, 3.0, 4.0, 5.0],\n                   [6.0, 7.0, 8.0, 9.0, 10.0],\n                   [11.0, 12.0, 13.0, 14.0, 15.0],\n                   [16.0, 17.0, 18.0, 19.0, 20.0],\n                   [21.0, 22.0, 23.0, 24.0, 25.0]]]]\n        kernel_size = 3\n        stride = 1\n        padding = 1\nOutput: output = [[[[7.0, 8.0, 9.0, 10.0, 10.0],\n                    [12.0, 13.0, 14.0, 15.0, 15.0],\n                    [17.0, 18.0, 19.0, 20.0, 20.0],\n                    [22.0, 23.0, 24.0, 25.0, 25.0],\n                    [22.0, 23.0, 24.0, 25.0, 25.0]]]]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 100 (batch size)
    • \n
    • 1 \u2264 C \u2264 512 (channels)
    • \n
    • 1 \u2264 H, W \u2264 1024 (height, width)
    • \n
    • 1 \u2264 kernel_size \u2264 16
    • \n
    • 1 \u2264 stride \u2264 16
    • \n
    • 0 \u2264 padding \u2264 16
    • \n
    • Input and output tensors use float32 precision
    • \n\n
    • Performance is measured with N = 4, kernel_size = 3, stride = 2
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, float* output, int N, int C, int H, int W,\n int kernel_size, int stride, int padding) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n C: cute.Int32,\n H: cute.Int32,\n W: cute.Int32,\n kernel_size: cute.Int32,\n stride: cute.Int32,\n padding: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(\n input: jax.Array, N: int, C: int, H: int, W: int, kernel_size: int, stride: int, padding: int\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n C: Int32,\n H: Int32,\n W: Int32,\n kernel_size: Int32,\n stride: Int32,\n padding: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input, output, N, C, H, W, kernel_size, stride, padding):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input, output, N, C, H, W, kernel_size, stride, padding):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"2D Max Pooling\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " C: int,\n", + " H: int,\n", + " W: int,\n", + " kernel_size: int,\n", + " stride: int,\n", + " padding: int,\n", + " ):\n", + " input_tensor = input.view(N, C, H, W)\n", + "\n", + " # Apply max pooling\n", + " result = torch.nn.functional.max_pool2d(\n", + " input_tensor, kernel_size=kernel_size, stride=stride, padding=padding\n", + " )\n", + "\n", + " output.copy_(result.flatten())\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"C\": (ctypes.c_int, \"in\"),\n", + " \"H\": (ctypes.c_int, \"in\"),\n", + " \"W\": (ctypes.c_int, \"in\"),\n", + " \"kernel_size\": (ctypes.c_int, \"in\"),\n", + " \"stride\": (ctypes.c_int, \"in\"),\n", + " \"padding\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " \"\"\"Simple test case matching the example in challenge.html\"\"\"\n", + " dtype = torch.float32\n", + " N, C, H, W = 1, 1, 3, 3\n", + " kernel_size, stride, padding = 2, 1, 0\n", + "\n", + " # Create input tensor: [[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]]\n", + " input_tensor = torch.tensor(\n", + " [[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]], device=\"cuda\", dtype=dtype\n", + " )\n", + "\n", + " # Calculate output dimensions\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " return {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"Comprehensive test suite covering various scenarios and edge cases\"\"\"\n", + " dtype = torch.float32\n", + " test_cases = []\n", + "\n", + " # Set seed for reproducible random tests\n", + " torch.manual_seed(42)\n", + "\n", + " # Test case 1: 2x2 kernel, stride 2, no padding (deterministic)\n", + " N, C, H, W = 1, 1, 4, 4\n", + " kernel_size, stride, padding = 2, 2, 0\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.tensor(\n", + " [\n", + " [\n", + " [\n", + " [1.0, 2.0, 3.0, 4.0],\n", + " [5.0, 6.0, 7.0, 8.0],\n", + " [9.0, 10.0, 11.0, 12.0],\n", + " [13.0, 14.0, 15.0, 16.0],\n", + " ]\n", + " ]\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: 3x3 kernel, stride 1, padding 1 (random data)\n", + " N, C, H, W = 1, 2, 5, 5\n", + " kernel_size, stride, padding = 3, 1, 1\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.randn(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: 1x1 kernel, stride 1, no padding (identity operation)\n", + " N, C, H, W = 2, 3, 8, 8\n", + " kernel_size, stride, padding = 1, 1, 0\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.randn(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: Large kernel with padding\n", + " N, C, H, W = 1, 1, 10, 10\n", + " kernel_size, stride, padding = 5, 2, 2\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.randn(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: Edge case with small dimensions\n", + " N, C, H, W = 1, 1, 2, 2\n", + " kernel_size, stride, padding = 2, 1, 0\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]], device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: Boundary conditions - kernel size equals input size\n", + " N, C, H, W = 1, 1, 3, 3\n", + " kernel_size, stride, padding = 3, 1, 0\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.tensor(\n", + " [[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]], device=\"cuda\", dtype=dtype\n", + " )\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 7: Large padding relative to input size\n", + " N, C, H, W = 1, 1, 4, 4\n", + " kernel_size, stride, padding = 2, 1, 1\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.tensor(\n", + " [\n", + " [\n", + " [\n", + " [1.0, 2.0, 3.0, 4.0],\n", + " [5.0, 6.0, 7.0, 8.0],\n", + " [9.0, 10.0, 11.0, 12.0],\n", + " [13.0, 14.0, 15.0, 16.0],\n", + " ]\n", + " ]\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 8: Multiple channels with different patterns\n", + " N, C, H, W = 1, 3, 6, 6\n", + " kernel_size, stride, padding = 2, 2, 1\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " # Create structured input with different patterns per channel\n", + " input_tensor = torch.zeros(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " input_tensor[0, 0, :, :] = torch.arange(H * W, device=\"cuda\", dtype=dtype).reshape(H, W)\n", + " input_tensor[0, 1, :, :] = (\n", + " torch.arange(H * W, device=\"cuda\", dtype=dtype).reshape(H, W).flip(0)\n", + " )\n", + " input_tensor[0, 2, :, :] = (\n", + " torch.arange(H * W, device=\"cuda\", dtype=dtype).reshape(H, W).flip(1)\n", + " )\n", + "\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 9: Extreme values and edge cases\n", + " N, C, H, W = 1, 1, 5, 5\n", + " kernel_size, stride, padding = 2, 1, 0\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " # Create input with extreme values\n", + " input_tensor = torch.tensor(\n", + " [\n", + " [\n", + " [\n", + " [1e6, -1e6, 0.0, 1e-6, -1e-6],\n", + " [float(\"inf\"), float(\"-inf\"), 1.0, 2.0, 3.0],\n", + " [4.0, 5.0, 6.0, 7.0, 8.0],\n", + " [9.0, 10.0, 11.0, 12.0, 13.0],\n", + " [14.0, 15.0, 16.0, 17.0, 18.0],\n", + " ]\n", + " ]\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " # Test case 10: Non-power-of-two dimensions\n", + " N, C, H, W = 1, 1, 7, 11\n", + " kernel_size, stride, padding = 3, 2, 1\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " input_tensor = torch.randn(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " test_cases.append(\n", + " {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " \"\"\"Large test case for performance evaluation\"\"\"\n", + " dtype = torch.float32\n", + " # Reasonable size for performance testing without memory issues\n", + " N, C, H, W = 4, 64, 256, 256 # 4 batches, 64 channels, 256x256 spatial\n", + " kernel_size, stride, padding = 3, 2, 1\n", + "\n", + " H_out = (H + 2 * padding - kernel_size) // stride + 1\n", + " W_out = (W + 2 * padding - kernel_size) // stride + 1\n", + "\n", + " # Use seeded random for reproducible performance tests\n", + " torch.manual_seed(123)\n", + " input_tensor = torch.randn(N, C, H, W, device=\"cuda\", dtype=dtype)\n", + " output_tensor = torch.empty(N * C * H_out * W_out, device=\"cuda\", dtype=dtype)\n", + "\n", + " return {\n", + " \"input\": input_tensor.flatten(),\n", + " \"output\": output_tensor,\n", + " \"N\": N,\n", + " \"C\": C,\n", + " \"H\": H,\n", + " \"W\": W,\n", + " \"kernel_size\": kernel_size,\n", + " \"stride\": stride,\n", + " \"padding\": padding,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/43_count_array_element.ipynb b/challenges/colab_exports/medium/43_count_array_element.ipynb new file mode 100644 index 00000000..eaa59f23 --- /dev/null +++ b/challenges/colab_exports/medium/43_count_array_element.ipynb @@ -0,0 +1,490 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that counts the number of elements with the integer value k in an array of 32-bit integers.\n The program should count the number of elements with k in an array.\n You are given an input array input of length N and integer k.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput: [1, 2, 3, 4, 1], k = 1\nOutput: 2\n
    \n\n

    Example 2:

    \n
    \nInput: [5, 10, 5, 2], k = 11\nOutput: 0\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • 1 ≤ input[i], k ≤ 100,000
    • \n\n
    • Performance is measured with K = 501,010, N = 100,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const int* input, int* output, int N, int K) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, K: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, K: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n K: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Count Array Element\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, K: int):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N,)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # count the number of element with value k in an input array\n", + " equality_tensor = input == K\n", + " output[0] = torch.sum(equality_tensor)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([1, 2, 3, 4, 1], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 5,\n", + " \"K\": 1,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1, 2, 3, 4, 1], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " \"K\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2] * 16, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " \"K\": 2,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 5, (32,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 32,\n", + " \"K\": 4,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 10, (1000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"K\": 5,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 1000, (100000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100000,\n", + " \"K\": 501,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 100001, (100000000,), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 100000000,\n", + " \"K\": 501010,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/44_count_2d_array_element.ipynb b/challenges/colab_exports/medium/44_count_2d_array_element.ipynb new file mode 100644 index 00000000..4f33771c --- /dev/null +++ b/challenges/colab_exports/medium/44_count_2d_array_element.ipynb @@ -0,0 +1,498 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that counts the number of elements with the integer value k in an 2D array of 32-bit integers.\n The program should count the number of elements with k in an 2D array.\n You are given an input 2D array input of length N x M and integer k.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput: input [[1, 2, 3],\n              [4, 5, 1]]\n       k = 1\nOutput: output = 2\n
    \n\n

    Example 2:

    \n
    \nInput: input [[5, 10],\n              [5, 2]]\n       k = 1\nOutput: output = 0\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N, M ≤ 10,000
    • \n
    • 1 ≤ input[i], k ≤ 100
    • \n\n
    • Performance is measured with K = 1, M = 10,000, N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const int* input, int* output, int N, int M, int K) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, M: cute.Int32, K: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, M: int, K: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n M: Int32,\n K: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Count 2D Array Element\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N, M)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # count the number of element with value k in an input array\n", + " equality_tensor = input == K\n", + " output[0] = torch.sum(equality_tensor)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([[1, 2, 3], [4, 5, 1]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 2,\n", + " \"M\": 3,\n", + " \"K\": 1,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1, 2, 3], [4, 5, 1]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " \"M\": 3,\n", + " \"K\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[2] * 16] * 3, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " \"M\": 16,\n", + " \"K\": 2,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (50, 50), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 50,\n", + " \"M\": 50,\n", + " \"K\": 5,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 101, (100, 100), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100,\n", + " \"M\": 100,\n", + " \"K\": 51,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (1000, 1000), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"M\": 1000,\n", + " \"K\": 1,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 3, (10000, 10000), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 10000,\n", + " \"M\": 10000,\n", + " \"K\": 1,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/45_count_3d_array_element.ipynb b/challenges/colab_exports/medium/45_count_3d_array_element.ipynb new file mode 100644 index 00000000..12d61c6c --- /dev/null +++ b/challenges/colab_exports/medium/45_count_3d_array_element.ipynb @@ -0,0 +1,513 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that counts the number of elements with the integer value p in an 3D array of 32-bit integers.\n The program should count the number of elements with p in an 3D array.\n You are given an input 3D array input of length N x M x K and integer p.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput: input [[[1, 2, 3],\n               [4, 5, 1]],\n              [[1, 1, 1],\n               [2, 2, 2]]]\n       N = 2, M = 2, K = 3\n       p = 1\nOutput: output = 5\n
    \n\n

    Example 2:

    \n
    \nInput: input [[[5, 10],\n               [5, 2],\n               [2, 2]]]\n       N = 1, M = 3, K = 2\n       p = 1\nOutput: output = 0\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N, M, K ≤ 1,000
    • \n
    • 1 ≤ input[i], p ≤ 100
    • \n\n
    • Performance is measured with K = 500, M = 500, N = 500
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, int* output, int N, int M, int K, int P) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n M: cute.Int32,\n K: cute.Int32,\n P: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, M: int, K: int, P: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n M: Int32,\n K: Int32,\n P: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int, P: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int, P: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Count 3D Array Element\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, input: torch.Tensor, output: torch.Tensor, N: int, M: int, K: int, P: int\n", + " ):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N, M, K)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # count the number of element with value k in an input array\n", + " equality_tensor = input == P\n", + " output[0] = torch.sum(equality_tensor)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"P\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + "\n", + " input = torch.tensor(\n", + " [[[1, 2, 3], [4, 5, 1]], [[1, 1, 1], [2, 2, 2]]], device=\"cuda\", dtype=dtype\n", + " )\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 2,\n", + " \"M\": 2,\n", + " \"K\": 3,\n", + " \"P\": 1,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[[1, 2, 3], [4, 5, 1]], [[1, 1, 1], [2, 2, 2]]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " \"M\": 2,\n", + " \"K\": 3,\n", + " \"P\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[[2] * 16] * 3] * 15, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 15,\n", + " \"M\": 3,\n", + " \"K\": 16,\n", + " \"P\": 2,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (50, 50, 50), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 50,\n", + " \"M\": 50,\n", + " \"K\": 50,\n", + " \"P\": 5,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 101, (100, 100, 100), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100,\n", + " \"M\": 100,\n", + " \"K\": 100,\n", + " \"P\": 51,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (100, 200, 300), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100,\n", + " \"M\": 200,\n", + " \"K\": 300,\n", + " \"P\": 3,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 3, (500, 500, 500), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 500,\n", + " \"M\": 500,\n", + " \"K\": 500,\n", + " \"P\": 2,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/47_subarray_sum.ipynb b/challenges/colab_exports/medium/47_subarray_sum.ipynb new file mode 100644 index 00000000..bd86f292 --- /dev/null +++ b/challenges/colab_exports/medium/47_subarray_sum.ipynb @@ -0,0 +1,497 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that computes the sum of a subarray of 32-bit integers.\n You are given an input array input of length N, and two indices S and E.\n S and E are inclusive, 0-based start and end indices \u2014 compute the sum of input[S..E].\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput: input = [1, 2, 1, 3, 4], S = 1, E = 3\nOutput: output = 6\n
    \n\n

    Example 2:

    \n
    \nInput: input = [1, 2, 3, 4], S = 0, E = 3\nOutput: output = 10\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • 1 ≤ input[i] ≤ 10
    • \n
    • 0 ≤ SEN - 1
    • \n\n
    • Performance is measured with N = 100,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, int* output, int N, int S, int E) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, S: cute.Int32, E: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, S: int, E: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n S: Int32,\n E: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, S: int, E: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, S: int, E: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Subarray Sum\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, S: int, E: int):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N,)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # add all element of subarray (input[S], ..., input[E])\n", + " output[0] = torch.sum(input[S : E + 1])\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"S\": (ctypes.c_int, \"in\"),\n", + " \"E\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([1, 2, 1, 3, 4], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 5,\n", + " \"S\": 1,\n", + " \"E\": 3,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1, 2, 3, 4], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " \"S\": 0,\n", + " \"E\": 3,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2] * 16, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " \"S\": 0,\n", + " \"E\": 15,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 5, (32,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 32,\n", + " \"S\": 0,\n", + " \"E\": 31,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 10, (1000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"S\": 0,\n", + " \"E\": 500,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (100000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100000,\n", + " \"S\": 123,\n", + " \"E\": 98765,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 11, (100000000,), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 100000000,\n", + " \"S\": 17651,\n", + " \"E\": 98765431,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/48_2d_subarray_sum.ipynb b/challenges/colab_exports/medium/48_2d_subarray_sum.ipynb new file mode 100644 index 00000000..efffc849 --- /dev/null +++ b/challenges/colab_exports/medium/48_2d_subarray_sum.ipynb @@ -0,0 +1,531 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that computes the sum of a 2D subarray of 32-bit integers.\n You are given an input 2D array input of length N x M, and two row indices S_ROW and E_ROW and two column indices S_COL and E_COL.\n S_ROW, E_ROW, S_COL and E_COL are inclusive, 0-based start and end indices \u2014 compute the sum of input[S_ROW..E_ROW][S_COL..E_COL].\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  input = [[1, 2, 3],\n                 [4, 5, 1]]\n\n        S_ROW = 0, E_ROW = 1, S_COL = 1, E_COL = 2\nOutput: output = 11\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [[5, 10],\n                 [5, 2]]\n        S_ROW = 0, E_ROW = 0, S_COL = 1, E_COL = 1\nOutput: output = 10\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N, M ≤ 10,000
    • \n
    • 1 ≤ input[i] ≤ 10
    • \n
    • 0 ≤ S_ROWE_ROWN - 1
    • \n
    • 0 ≤ S_COLE_COLM - 1
    • \n\n
    • Performance is measured with M = 10,000, N = 10,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, int* output, int N, int M, int S_ROW, int E_ROW, int S_COL,\n int E_COL) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n M: cute.Int32,\n S_ROW: cute.Int32,\n E_ROW: cute.Int32,\n S_COL: cute.Int32,\n E_COL: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(\n input: jax.Array, N: int, M: int, S_ROW: int, E_ROW: int, S_COL: int, E_COL: int\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n M: Int32,\n S_ROW: Int32,\n E_ROW: Int32,\n S_COL: Int32,\n E_COL: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n output: torch.Tensor,\n N: int,\n M: int,\n S_ROW: int,\n E_ROW: int,\n S_COL: int,\n E_COL: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n output: torch.Tensor,\n N: int,\n M: int,\n S_ROW: int,\n E_ROW: int,\n S_COL: int,\n E_COL: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"2D Subarray Sum\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " M: int,\n", + " S_ROW: int,\n", + " E_ROW: int,\n", + " S_COL: int,\n", + " E_COL: int,\n", + " ):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N, M)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # add all elements of subarray (input[S_ROW..E_ROW][S_COL..E_COL])\n", + " output[0] = torch.sum(input[S_ROW : E_ROW + 1, S_COL : E_COL + 1])\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"S_ROW\": (ctypes.c_int, \"in\"),\n", + " \"E_ROW\": (ctypes.c_int, \"in\"),\n", + " \"S_COL\": (ctypes.c_int, \"in\"),\n", + " \"E_COL\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([[1, 2, 3], [4, 5, 1]], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 2,\n", + " \"M\": 3,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 1,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[5, 10], [5, 2]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2,\n", + " \"M\": 2,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 0,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[2] * 16] * 3, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " \"M\": 16,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 2,\n", + " \"S_COL\": 0,\n", + " \"E_COL\": 15,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (50, 50), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 50,\n", + " \"M\": 50,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 49,\n", + " \"S_COL\": 0,\n", + " \"E_COL\": 49,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (100, 100), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100,\n", + " \"M\": 100,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 79,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 87,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (1000, 1000), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"M\": 1000,\n", + " \"S_ROW\": 10,\n", + " \"E_ROW\": 951,\n", + " \"S_COL\": 12,\n", + " \"E_COL\": 810,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 11, (10000, 10000), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 10000,\n", + " \"M\": 10000,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 9998,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 9999,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/49_3d_subarray_sum.ipynb b/challenges/colab_exports/medium/49_3d_subarray_sum.ipynb new file mode 100644 index 00000000..c19b3cb3 --- /dev/null +++ b/challenges/colab_exports/medium/49_3d_subarray_sum.ipynb @@ -0,0 +1,560 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that computes the sum of a 3D subarray of 32-bit integers.\n You are given an input 3D array input of length N x M x K, and two depth indices S_DEP and E_DEP and two row indices S_ROW and E_ROW and two column indices S_COL and E_COL.\n S_DEP, E_DEP, S_ROW, E_ROW, S_COL and E_COL are inclusive, 0-based start and end indices \u2014 compute the sum of input[S_DEP..E_DEP][S_ROW..E_ROW][S_COL..E_COL].\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  input = [[[1, 2, 3],\n                  [4, 5, 1]],\n                 [[1, 1, 1],\n                  [2, 2, 2]]]\n        N = 2, M = 2, K = 3\n        S_DEP = 0, E_DEP = 1, S_ROW = 0, E_ROW = 0, S_COL = 1, E_COL = 2\nOutput: output = 7\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [[[5, 10],\n                  [5, 2],\n                  [2, 2]]]\n        N = 1, M = 3, K = 2\n        S_DEP = 0, E_DEP = 0, S_ROW = 0, E_ROW = 2, S_COL = 1, E_COL = 1\nOutput: output = 14\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N, M, K ≤ 500
    • \n
    • 1 ≤ input[i] ≤ 10
    • \n
    • 0 ≤ S_DEPE_DEPN - 1
    • \n
    • 0 ≤ S_ROWE_ROWM - 1
    • \n
    • 0 ≤ S_COLE_COLK - 1
    • \n\n
    • Performance is measured with K = 500, M = 500, N = 500
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, int* output, int N, int M, int K, int S_DEP, int E_DEP,\n int S_ROW, int E_ROW, int S_COL, int E_COL) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n output: cute.Tensor,\n N: cute.Int32,\n M: cute.Int32,\n K: cute.Int32,\n S_DEP: cute.Int32,\n E_DEP: cute.Int32,\n S_ROW: cute.Int32,\n E_ROW: cute.Int32,\n S_COL: cute.Int32,\n E_COL: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(\n input: jax.Array,\n N: int,\n M: int,\n K: int,\n S_DEP: int,\n E_DEP: int,\n S_ROW: int,\n E_ROW: int,\n S_COL: int,\n E_COL: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n M: Int32,\n K: Int32,\n S_DEP: Int32,\n E_DEP: Int32,\n S_ROW: Int32,\n E_ROW: Int32,\n S_COL: Int32,\n E_COL: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n output: torch.Tensor,\n N: int,\n M: int,\n K: int,\n S_DEP: int,\n E_DEP: int,\n S_ROW: int,\n E_ROW: int,\n S_COL: int,\n E_COL: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n output: torch.Tensor,\n N: int,\n M: int,\n K: int,\n S_DEP: int,\n E_DEP: int,\n S_ROW: int,\n E_ROW: int,\n S_COL: int,\n E_COL: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"3D Subarray Sum\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " M: int,\n", + " K: int,\n", + " S_DEP: int,\n", + " E_DEP: int,\n", + " S_ROW: int,\n", + " E_ROW: int,\n", + " S_COL: int,\n", + " E_COL: int,\n", + " ):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N, M, K)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # add all elements of subarray (input[S_DEP..E_DEP][S_ROW..E_ROW][S_COL..E_COL])\n", + " output[0] = torch.sum(input[S_DEP : E_DEP + 1, S_ROW : E_ROW + 1, S_COL : E_COL + 1])\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"S_DEP\": (ctypes.c_int, \"in\"),\n", + " \"E_DEP\": (ctypes.c_int, \"in\"),\n", + " \"S_ROW\": (ctypes.c_int, \"in\"),\n", + " \"E_ROW\": (ctypes.c_int, \"in\"),\n", + " \"S_COL\": (ctypes.c_int, \"in\"),\n", + " \"E_COL\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor(\n", + " [[[1, 2, 3], [4, 5, 1]], [[1, 1, 1], [2, 2, 2]]], device=\"cuda\", dtype=dtype\n", + " )\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 2,\n", + " \"M\": 2,\n", + " \"K\": 3,\n", + " \"S_DEP\": 0,\n", + " \"E_DEP\": 1,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 0,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[[5, 10], [5, 2], [2, 2]]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " \"M\": 3,\n", + " \"K\": 2,\n", + " \"S_DEP\": 0,\n", + " \"E_DEP\": 0,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 2,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[[2] * 16] * 20] * 30, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 30,\n", + " \"M\": 20,\n", + " \"K\": 16,\n", + " \"S_DEP\": 0,\n", + " \"E_DEP\": 29,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 19,\n", + " \"S_COL\": 0,\n", + " \"E_COL\": 15,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (50, 50, 50), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 50,\n", + " \"M\": 50,\n", + " \"K\": 50,\n", + " \"S_DEP\": 0,\n", + " \"E_DEP\": 49,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 49,\n", + " \"S_COL\": 0,\n", + " \"E_COL\": 49,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (77, 87, 57), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 77,\n", + " \"M\": 87,\n", + " \"K\": 57,\n", + " \"S_DEP\": 0,\n", + " \"E_DEP\": 76,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 37,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 50,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(1, 11, (100, 100, 100), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 100,\n", + " \"M\": 100,\n", + " \"K\": 100,\n", + " \"S_DEP\": 10,\n", + " \"E_DEP\": 91,\n", + " \"S_ROW\": 77,\n", + " \"E_ROW\": 91,\n", + " \"S_COL\": 12,\n", + " \"E_COL\": 81,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(1, 11, (500, 500, 500), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 500,\n", + " \"M\": 500,\n", + " \"K\": 500,\n", + " \"S_DEP\": 11,\n", + " \"E_DEP\": 498,\n", + " \"S_ROW\": 0,\n", + " \"E_ROW\": 499,\n", + " \"S_COL\": 1,\n", + " \"E_COL\": 489,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/4_reduction.ipynb b/challenges/colab_exports/medium/4_reduction.ipynb new file mode 100644 index 00000000..4e519c0c --- /dev/null +++ b/challenges/colab_exports/medium/4_reduction.ipynb @@ -0,0 +1,490 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that performs parallel reduction on an array of 32-bit floating point numbers to compute their sum.\n The program should take an input array and produce a single output value containing the sum of all elements.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]\nOutput: 36.0\n
    \n\n

    Example 2:

    \n
    \nInput: [-2.5, 1.5, -1.0, 2.0]\nOutput: 0.0\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • -1000.0 ≤ input[i] ≤ 1000.0
    • \n
    • The final sum will always fit within a 32-bit float
    • \n\n
    • Performance is measured with N = 4,194,304
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Reduction\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == (N,)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + " output[0] = torch.sum(input.double()).float()\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " N = 8\n", + " return {\"input\": input, \"output\": output, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 8,\n", + " }\n", + " )\n", + " # negative_numbers\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-2.5, 1.5, -1.0, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # single_element\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([42.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + " # all_zeros\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(1024, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + " # all_ones\n", + " tests.append(\n", + " {\n", + " \"input\": torch.ones(1024, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1024,\n", + " }\n", + " )\n", + " # non_power_of_two\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 5,\n", + " }\n", + " )\n", + " # large_random\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-1000.0, 1000.0),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + " # large_random_2\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(15000000, device=\"cuda\", dtype=dtype).uniform_(0.0, 1000.0),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 15000000,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4_194_304\n", + " input = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(0.0, 1000.0)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\"input\": input, \"output\": output, \"N\": N}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/50_rms_normalization.ipynb b/challenges/colab_exports/medium/50_rms_normalization.ipynb new file mode 100644 index 00000000..045d6634 --- /dev/null +++ b/challenges/colab_exports/medium/50_rms_normalization.ipynb @@ -0,0 +1,551 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement RMS Normalization forward pass for 1D input vectors. Given an input tensor of shape [N] where N is the number of elements, compute the normalized output using a scalar scale (gamma) and shift (beta) parameter.\n

    \n\n

    \n RMS Normalization computes:\n $$\n \\begin{align}\n \\text{rms} &= \\sqrt{\\frac{1}{N} \\sum_{i=1}^{N} x_i^2 + \\epsilon} \\\\\n \\hat{x}_i &= \\frac{x_i}{\\text{rms}} \\\\\n y_i &= \\gamma \\hat{x}_i + \\beta\n \\end{align}\n $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output tensor
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  input = [1.0, 2.0, 3.0, 4.0]  (N=4)\n        gamma = 1.0\n        beta = 0.0\n        eps = 1e-5\nOutput: output = [0.36514813, 0.73029625, 1.0954444, 1.4605925 ]\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [1.0, 2.0, 3.0]  (N=3)\n        gamma = 1.0\n        beta = 0.0\n        eps = 1e-5\nOutput: output = [0.46290955, 0.9258191, 1.3887286]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 N \u2264 100,000
    • \n
    • eps = 1e-5
    • \n
    • -100.0 \u2264 input values \u2264 100.0
    • \n
    • 0.1 \u2264 gamma \u2264 10.0
    • \n
    • -10.0 \u2264 beta \u2264 10.0
    • \n\n
    • Performance is measured with N = 100,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float gamma, float beta, float* output, int N,\n float eps) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(\n input: cute.Tensor,\n gamma: cute.Float32,\n beta: cute.Float32,\n output: cute.Tensor,\n N: cute.Int32,\n eps: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input, gamma, beta are tensors on the GPU\n@jax.jit\ndef solve(input: jax.Array, gamma: jax.Array, beta: jax.Array, N: int, eps: float) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n gamma: Float32,\n beta: Float32,\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n eps: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(\n input: torch.Tensor,\n gamma: torch.Tensor,\n beta: torch.Tensor,\n output: torch.Tensor,\n N: int,\n eps: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, gamma: float, beta: float, output: torch.Tensor, N: int, eps: float):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"RMS Normalization\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " gamma: float,\n", + " beta: float,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " eps: float,\n", + " ):\n", + " assert input.shape == output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + "\n", + " # RMSNorm: compute root mean square (without mean-centering)\n", + " rms = torch.sqrt(torch.mean(input**2) + eps) # shape: scalar\n", + "\n", + " # Normalize\n", + " normalized = input / rms # shape: [N]\n", + "\n", + " # Scale and shift\n", + " output.copy_(gamma * normalized + beta)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"gamma\": (ctypes.c_float, \"in\"),\n", + " \"beta\": (ctypes.c_float, \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"eps\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 4\n", + " input = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " gamma = 1.0\n", + " beta = 0.0\n", + " output = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " eps = 1e-5\n", + " return {\n", + " \"input\": input,\n", + " \"gamma\": gamma,\n", + " \"beta\": beta,\n", + " \"output\": output,\n", + " \"N\": N,\n", + " \"eps\": eps,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_small\n", + " N = 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": 1.0,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # single_feature\n", + " N = 1\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([5.0], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": 2.0,\n", + " \"beta\": -1.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # all zeros\n", + " N = 4\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros(N, device=\"cuda\", dtype=dtype),\n", + " \"gamma\": 1.0,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # negative numbers\n", + " N = 5\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, -2.0, -3.0, -4.0, -5.0], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": 1.0,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # different gamma/beta\n", + " N = 3\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([0.0, 1.0, 2.0], device=\"cuda\", dtype=dtype),\n", + " \"gamma\": 0.5,\n", + " \"beta\": -1.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # large values\n", + " N = 8\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"gamma\": 1.5,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " # large N\n", + " N = 2000\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-100.0, 100.0),\n", + " \"gamma\": 1.3,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 100000\n", + " return {\n", + " \"input\": torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"gamma\": 1.5,\n", + " \"beta\": 0.0,\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype),\n", + " \"N\": N,\n", + " \"eps\": 1e-5,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/51_max_subarray_sum.ipynb b/challenges/colab_exports/medium/51_max_subarray_sum.ipynb new file mode 100644 index 00000000..3a0c5ef0 --- /dev/null +++ b/challenges/colab_exports/medium/51_max_subarray_sum.ipynb @@ -0,0 +1,517 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a program that computes the maximum sum of any contiguous subarray of length exactly window_size. You are given an array input of length N consisting of 32-bit signed integers, and an integer window_size.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output variable
    • \n
    \n\n

    Example 1:

    \n
    \nInput:  input = [1, 2, 4, 2, 3], window_size = 2\nOutput: output = 6\n
    \n\n

    Example 2:

    \n
    \nInput:  input = [-1, -4, -2, 1], window_size = 3\nOutput: output = -5\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 50,000
    • \n
    • -10 ≤ input[i] ≤ 10
    • \n
    • 1 ≤ window_sizeN
    • \n\n
    • Performance is measured with N = 50,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const int* input, int* output, int N, int window_size) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32, window_size: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int, window_size: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n input: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Int32, MutExternalOrigin],\n N: Int32,\n window_size: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, window_size: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int, window_size: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Max Subarray Sum\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int, window_size: int):\n", + " # Validate input types and shapes\n", + " assert input.shape == (N,)\n", + " assert output.shape == (1,)\n", + " assert input.dtype == torch.int32\n", + " assert output.dtype == torch.int32\n", + "\n", + " # Computes the maximum sum of any contiguous subarray of length exactly window_size\n", + " # using a sliding window approach.\n", + "\n", + " # Compute the sum of the first window_size elements (the initial window)\n", + " current_sum = input[:window_size].sum()\n", + "\n", + " # Initialize max_sum with the sum of the first window\n", + " max_sum = current_sum\n", + "\n", + " # Slide the window across the array from index window_size to N - 1\n", + " for i in range(window_size, N):\n", + " # Update the current sum by subtracting the element leaving the window\n", + " # and adding the new element entering the window\n", + " current_sum += input[i] - input[i - window_size]\n", + "\n", + " # Update max_sum if the current sum is greater\n", + " max_sum = torch.max(max_sum, current_sum)\n", + "\n", + " # Store the final result in the output tensor\n", + " output[0] = max_sum\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"window_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.tensor([1, 2, 4, 2, 3], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 5,\n", + " \"window_size\": 2,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.int32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-1, -4, -2, 1], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " \"window_size\": 3,\n", + " }\n", + " )\n", + "\n", + " # all_same_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2] * 16, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " \"window_size\": 15,\n", + " }\n", + " )\n", + "\n", + " # all_minus_value\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-10] * 1000, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"window_size\": 500,\n", + " }\n", + " )\n", + "\n", + " # increasing_sequence\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(-10, 11, (123,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 123,\n", + " \"window_size\": 7,\n", + " }\n", + " )\n", + "\n", + " # medium_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(-10, 11, (1000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " \"window_size\": 476,\n", + " }\n", + " )\n", + "\n", + " # large_size\n", + " tests.append(\n", + " {\n", + " \"input\": torch.randint(-10, 11, (10000,), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " \"window_size\": 7011,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.int32\n", + " input = torch.randint(-10, 11, (50000,), device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"N\": 50000,\n", + " \"window_size\": 25000,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/55_attn_w_linear_bias.ipynb b/challenges/colab_exports/medium/55_attn_w_linear_bias.ipynb new file mode 100644 index 00000000..e1cf5c75 --- /dev/null +++ b/challenges/colab_exports/medium/55_attn_w_linear_bias.ipynb @@ -0,0 +1,574 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement Attention with Linear Biases (ALiBi), following the method described in\n \n \"Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation\"\n , for a given set of matrices.\n Given the query matrix Q of size M\u00d7d, key matrix K of size N\u00d7d, and value matrix\n V of size N\u00d7d, your program should compute the output matrix using the formula:\n

    \n\n

    \n $$\n \\text{Attention}_{ALiBi}(Q, K, V) = \\text{softmax}\\Bigl( \\frac{QK^T}{\\sqrt{d}} + \\alpha \\cdot \\Delta \\Bigr)V\n $$\n

    \n\n

    \n where α is a slope controlling the linear bias and Δ = i - j represents the relative position between query i and key j.\n The softmax function is applied row-wise. Q, K, V, output, and α are all of data type float32;\n M, N, d are of data type int32.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the output matrix\n output\n
    • \n
    \n

    Example 1:

    \n

    \nInput:
    \nQ (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nK (3\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0 \\\\\n0.0 & 0.0 & 1.0 & 0.0\n\\end{bmatrix}\n$$\nV (3\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0 \\\\\n9.0 & 10.0 & 11.0 & 12.0\n\\end{bmatrix}\n$$\n$\\alpha = 0.5$\n

    \n\n

    \nOutput:
    \noutput (2\u00d74):\n$$\n\\begin{bmatrix}\n3.05 & 4.05 & 6.05 & 7.05 \\\\\n3.93 & 4.93 & 5.93 & 6.93\n\\end{bmatrix}\n$$\n

    \n\n

    Example 2:

    \n

    \nInput:
    \nQ (1\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 2.0\n\\end{bmatrix}\n$$\nK (2\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 \\\\\n0.0 & 1.0\n\\end{bmatrix}\n$$\nV (2\u00d72):\n$$\n\\begin{bmatrix}\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\n\u03b1 = 0.8\n

    \n\n

    \nOutput:
    \noutput (1\u00d72):\n$$\n\\begin{bmatrix}\n3.95 & 4.95\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • Matrix Q is of size M\u00d7d and matrices K and V are of size\n N\u00d7d
    • \n
    • 1 ≤ M, N ≤ 2048
    • \n
    • 1 ≤ d ≤ 1024
    • \n
    • -1.0 ≤ α ≤ 1.0
    • \n\n
    • Performance is measured with M = 2,048, N = 2,048
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int M, int N,\n int d, float alpha) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n d: cute.Int32,\n alpha: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(\n Q: jax.Array, K: jax.Array, V: jax.Array, M: int, N: int, d: int, alpha: float\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, K, V, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n d: Int32,\n alpha: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n M: int,\n N: int,\n d: int,\n alpha: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n M: int,\n N: int,\n d: int,\n alpha: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Attention with Linear Biases\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " d: int,\n", + " alpha: float,\n", + " ):\n", + " assert Q.shape == (M, d)\n", + " assert K.shape == (N, d)\n", + " assert V.shape == (N, d)\n", + " assert output.shape == (M, d)\n", + "\n", + " scale = d**0.5\n", + " attn = torch.matmul(Q, K.t()) / scale\n", + "\n", + " pos_bias = alpha * (\n", + " torch.arange(M, device=Q.device).unsqueeze(1)\n", + " - torch.arange(N, device=K.device).unsqueeze(0)\n", + " )\n", + " attn = attn + pos_bias\n", + "\n", + " attn = torch.softmax(attn, dim=1) # M , N\n", + " torch.matmul(attn, V, out=output)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"d\": (ctypes.c_int, \"in\"),\n", + " \"alpha\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " V = torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": 2, \"N\": 3, \"d\": 4, \"alpha\": 0.5}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_example 1\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor([[1.0, 2.0]], device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.tensor([[1.0, 0.0], [0.0, 1.0]], device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.tensor([[3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, 2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 1,\n", + " \"N\": 2,\n", + " \"d\": 2,\n", + " \"alpha\": 0.8,\n", + " }\n", + " )\n", + "\n", + " # basic_example 2\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 3,\n", + " \"d\": 4,\n", + " \"alpha\": 0.5,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, 5, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 3,\n", + " \"d\": 5,\n", + " \"alpha\": 0.5,\n", + " }\n", + " )\n", + "\n", + " # mixed_values\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0], [-7.0, 8.0, -9.0], [10.0, -11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[2.0, -1.0, 3.0], [-4.0, 5.0, -6.0], [7.0, -8.0, 9.0], [-10.0, 11.0, -12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 0.5, -0.5], [-1.0, 2.0, 3.0], [4.0, -2.0, 1.0], [0.0, 1.0, -1.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(4, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"N\": 4,\n", + " \"d\": 3,\n", + " \"alpha\": 1.0,\n", + " }\n", + " )\n", + "\n", + " # large_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((64, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(64, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 64,\n", + " \"N\": 128,\n", + " \"d\": 32,\n", + " \"alpha\": -0.76,\n", + " }\n", + " )\n", + "\n", + " # different alpha\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((64, 32), device=\"cuda\", dtype=dtype).uniform_(-1, 1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-1, 1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-1, 1),\n", + " \"output\": torch.empty(64, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 64,\n", + " \"N\": 128,\n", + " \"d\": 32,\n", + " \"alpha\": -0.3,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N, d = 2048, 2048, 1024\n", + " Q = torch.empty((M, d), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " K = torch.empty((N, d), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " V = torch.empty((N, d), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " output = torch.empty(M, d, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": M, \"N\": N, \"d\": d, \"alpha\": 0.5}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/57_fp16_batched_matmul.ipynb b/challenges/colab_exports/medium/57_fp16_batched_matmul.ipynb new file mode 100644 index 00000000..cc96a9ca --- /dev/null +++ b/challenges/colab_exports/medium/57_fp16_batched_matmul.ipynb @@ -0,0 +1,499 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a batched matrix multiplication in FP16. Given a batch of matrices A of shape [B, M, K] and a batch of matrices B of shape [B, K, N], compute the output batch C of shape [B, M, N] such that for each batch index b:\n $$\n C_b = A_b \\times B_b\n $$\n All matrices are stored in row-major order and use 16-bit floating point numbers (FP16/half). Accumulation during multiplication should use FP32 for better precision before converting the final result to FP16.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • Accumulation during multiplication should use FP32 for better precision before converting the final result to FP16
    • \n
    • The final result must be stored in the C array as half
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\nB = 2, M = 2, K = 3, N = 2\nA = [\n  [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],\n  [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]\n]\nB = [\n  [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],\n  [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]\n]\nOutput:\nC = [\n  [[22.0, 28.0], [49.0, 64.0]],\n  [[92.0, 68.0], [128.0, 95.0]]\n]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ B ≤ 128
    • \n
    • 1 ≤ M, N, K ≤ 1024
    • \n\n
    • Performance is measured with K = 256, M = 256, N = 256
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n\n// A, B, C are device pointers\nextern \"C\" void solve(const half* A, const half* B, half* C, int BATCH, int M, int N, int K) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n BATCH: cute.Int32,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, BATCH: int, M: int, N: int, K: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n A: UnsafePointer[Float16, MutExternalOrigin],\n B: UnsafePointer[Float16, MutExternalOrigin],\n C: UnsafePointer[Float16, MutExternalOrigin],\n BATCH: Int32,\n M: Int32,\n N: Int32,\n K: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, BATCH: int, M: int, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, b, c are tensors on the GPU\ndef solve(a: torch.Tensor, b: torch.Tensor, c: torch.Tensor, BATCH: int, M: int, N: int, K: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"FP16 Batched Matrix Multiplication\",\n", + " atol=5e-2,\n", + " rtol=5e-2,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, BATCH: int, M: int, N: int, K: int\n", + " ):\n", + " # A: (BATCH, M, K), B: (BATCH, K, N), C: (BATCH, M, N)\n", + " A = A.view(BATCH, M, K)\n", + " B = B.view(BATCH, K, N)\n", + " # Use FP32 for accumulation, then convert to FP16\n", + " A_f32 = A.to(torch.float32)\n", + " B_f32 = B.to(torch.float32)\n", + " result = torch.bmm(A_f32, B_f32)\n", + " C.copy_(result.to(torch.float16))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_uint16), \"out\"),\n", + " \"BATCH\": (ctypes.c_int, \"in\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " BATCH, M, K, N = 2, 2, 3, 2\n", + " A = torch.tensor(\n", + " [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " B = torch.tensor(\n", + " [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " C = torch.empty(BATCH, M, N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"BATCH\": BATCH, \"M\": M, \"N\": N, \"K\": K}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float16\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " # 1. basic_example\n", + " A1 = torch.tensor(\n", + " [[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten()\n", + " B1 = torch.tensor(\n", + " [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [[6.0, 5.0], [4.0, 3.0], [2.0, 1.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " ).flatten()\n", + " C1 = torch.empty((2, 2, 2), device=device, dtype=dtype)\n", + " tests.append({\"A\": A1, \"B\": B1, \"C\": C1, \"BATCH\": 2, \"M\": 2, \"N\": 2, \"K\": 3})\n", + "\n", + " # 2. single_batch\n", + " A2 = torch.tensor(\n", + " [[[1.0, 0.0, 2.0], [0.0, 1.0, 2.0], [2.0, 1.0, 0.0]]], device=device, dtype=dtype\n", + " ).flatten()\n", + " B2 = torch.tensor(\n", + " [[[2.0, 1.0, 0.0], [1.0, 2.0, 0.0], [0.0, 1.0, 2.0]]], device=device, dtype=dtype\n", + " ).flatten()\n", + " C2 = torch.empty((1, 3, 3), device=device, dtype=dtype)\n", + " tests.append({\"A\": A2, \"B\": B2, \"C\": C2, \"BATCH\": 1, \"M\": 3, \"N\": 3, \"K\": 3})\n", + "\n", + " # 3. batch_4_small\n", + " A3 = torch.empty((4, 2, 2), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " B3 = torch.empty((4, 2, 2), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " C3 = torch.empty((4, 2, 2), device=device, dtype=dtype)\n", + " tests.append({\"A\": A3, \"B\": B3, \"C\": C3, \"BATCH\": 4, \"M\": 2, \"N\": 2, \"K\": 2})\n", + "\n", + " # 4. batch_8_rectangular\n", + " A4 = torch.empty((8, 4, 2), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " B4 = torch.empty((8, 2, 3), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " C4 = torch.empty((8, 4, 3), device=device, dtype=dtype)\n", + " tests.append({\"A\": A4, \"B\": B4, \"C\": C4, \"BATCH\": 8, \"M\": 4, \"N\": 3, \"K\": 2})\n", + "\n", + " # 5. batch_16_medium\n", + " A5 = torch.empty((16, 16, 16), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " B5 = torch.empty((16, 16, 16), device=device, dtype=dtype).uniform_(-1.0, 1.0)\n", + " C5 = torch.empty((16, 16, 16), device=device, dtype=dtype)\n", + " tests.append({\"A\": A5, \"B\": B5, \"C\": C5, \"BATCH\": 16, \"M\": 16, \"N\": 16, \"K\": 16})\n", + "\n", + " # 6. batch_2_non_square\n", + " A6 = torch.empty((2, 8, 4), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " B6 = torch.empty((2, 4, 6), device=device, dtype=dtype).uniform_(-5.0, 5.0)\n", + " C6 = torch.empty((2, 8, 6), device=device, dtype=dtype)\n", + " tests.append({\"A\": A6, \"B\": B6, \"C\": C6, \"BATCH\": 2, \"M\": 8, \"N\": 6, \"K\": 4})\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " BATCH, M, N, K = 32, 256, 256, 256\n", + " A = torch.empty(BATCH, M, K, device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0)\n", + " B = torch.empty(BATCH, K, N, device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0)\n", + " C = torch.empty(BATCH, M, N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"BATCH\": BATCH, \"M\": M, \"N\": N, \"K\": K}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/58_fp16_dot_product.ipynb b/challenges/colab_exports/medium/58_fp16_dot_product.ipynb new file mode 100644 index 00000000..ef74ab04 --- /dev/null +++ b/challenges/colab_exports/medium/58_fp16_dot_product.ipynb @@ -0,0 +1,504 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that computes the dot product of two vectors containing 16-bit floating point numbers (FP16/half).\n The dot product is the sum of the products of the corresponding elements of two vectors.\n

    \n

    \n Mathematically, the dot product of two vectors $A$ and $B$ of length $n$ is defined as:\n $$\n A \\cdot B = \\sum_{i=0}^{n-1} A_i \\cdot B_i = A_0 \\cdot B_0 + A_1 \\cdot B_1 + \\ldots + A_{n-1} \\cdot B_{n-1}\n $$\n

    \n

    \n All inputs are stored as 16-bit floating point numbers (FP16/half). For best precision, accumulation during multiplication should use FP32 before converting the final result to FP16.\n

    \n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • Accumulation during multiplication should use FP32 for better precision before converting the final result to FP16
    • \n
    • The final result must be stored in the output variable as half
    • \n
    \n

    Example 1:

    \n
    Input:  A = [1.0, 2.0, 3.0, 4.0]\n               B = [5.0, 6.0, 7.0, 8.0]\n       Output: result = 70.0  (1.0*5.0 + 2.0*6.0 + 3.0*7.0 + 4.0*8.0)
    \n

    Example 2:

    \n
    Input:  A = [0.5, 1.5, 2.5]\n               B = [2.0, 3.0, 4.0]\n       Output: result = 15.5  (0.5*2.0 + 1.5*3.0 + 2.5*4.0)
    \n

    Constraints

    \n
      \n
    • A and B have identical lengths
    • \n
    • 1 \u2264 N \u2264 100,000,000
    • \n\n
    • Performance is measured with N = 100,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n\n// A, B, result are device pointers\nextern \"C\" void solve(const half* A, const half* B, half* result, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, result are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, result: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on the GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# A, B, result are device pointers\n@export\ndef solve(\n A: UnsafePointer[Float16, MutExternalOrigin],\n B: UnsafePointer[Float16, MutExternalOrigin],\n result: UnsafePointer[Float16, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, result are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, result: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# A, B, result are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, result: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"FP16 Dot Product\", atol=5e-2, rtol=5e-2, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(self, A: torch.Tensor, B: torch.Tensor, result: torch.Tensor, N: int):\n", + " assert A.shape == (N,)\n", + " assert B.shape == (N,)\n", + " assert result.shape == (1,)\n", + " # Use FP32 for accumulation, then convert to FP16\n", + " A_f32 = A.to(torch.float32)\n", + " B_f32 = B.to(torch.float32)\n", + " result_f32 = torch.dot(A_f32, B_f32)\n", + " result[0] = result_f32.to(torch.float16)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"result\": (ctypes.POINTER(ctypes.c_uint16), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " A = torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " result = torch.empty(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"result\": result,\n", + " \"N\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float16\n", + " tests = []\n", + " # basic_small\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, 2.0, 3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([5.0, 6.0, 7.0, 8.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # all_zeros\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([0.0] * 16, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([0.0] * 16, device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 16,\n", + " }\n", + " )\n", + " # negative_numbers\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([-1.0, -2.0, -3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-5.0, -6.0, -7.0, -8.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # mixed_positive_negative\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-1.0, 2.0, -3.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # orthogonal_vectors\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([1.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([0.0, 1.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # medium_sized_vector\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"B\": torch.empty(1000, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1000,\n", + " }\n", + " )\n", + " # large_vector\n", + " tests.append(\n", + " {\n", + " \"A\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"B\": torch.empty(10000, device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"result\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10000,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float16\n", + " N = 100000000\n", + " A = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " B = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " result = torch.zeros(1, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"result\": result,\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/5_softmax.ipynb b/challenges/colab_exports/medium/5_softmax.ipynb new file mode 100644 index 00000000..8a57f508 --- /dev/null +++ b/challenges/colab_exports/medium/5_softmax.ipynb @@ -0,0 +1,506 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a program that computes the softmax function for an array of 32-bit floating-point numbers on a GPU. The softmax function is defined as follows:\n

    \n\n

    \n For an input array $x$ of length $n$, the softmax of $x$, denoted $\\sigma(x)$, is an array of length $n$ where the $i$-th element is:\n

    \n\n

    \n $\\sigma(x)_i = \\frac{e^{x_i}}{\\sum_{j=1}^{n} e^{x_j}}$\n

    \n\n

    \n Your solution should handle potential overflow issues by using the \"max trick\". Subtract the maximum value of the input array from each element before exponentiation.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the array output
    • \n
    \n\n

    Example 1:

    \n
    \nInput: [1.0, 2.0, 3.0], N = 3\nOutput: [0.090, 0.244, 0.665] (approximately)\n
    \n\n

    Example 2:

    \n
    \nInput: [-10.0, -5.0, 0.0, 5.0, 10.0], N = 5\nOutput: [2.047e-09, 3.038e-07, 4.509e-05, 6.693e-03, 9.933e-01] (approximately)\n
    \n\n

    Constraints

    \n\n
      \n
    • 1 ≤ N ≤ 500,000
    • \n\n
    • Performance is measured with N = 500,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n__global__ void softmax_kernel(const float* input, float* output, int N) {}\n\n// input, output are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* input, float* output, int N) {\n int threadsPerBlock = 256;\n int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;\n\n softmax_kernel<<>>(input, output, N);\n cudaDeviceSynchronize();\n}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n@triton.jit\ndef softmax_kernel(input, output, N, BLOCK_SIZE: tl.constexpr):\n input = input.to(tl.pointer_type(tl.float32))\n output = output.to(tl.pointer_type(tl.float32))\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(name=\"Softmax\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\")\n", + "\n", + " def reference_impl(self, input: torch.Tensor, output: torch.Tensor, N: int):\n", + " assert input.shape == output.shape == (N,)\n", + " assert input.dtype == output.dtype\n", + " assert input.device == output.device\n", + " max_val = torch.max(input)\n", + " exp_x = torch.exp(input - max_val)\n", + " sum_exp = torch.sum(exp_x)\n", + " output.copy_(exp_x / sum_exp)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(3, device=\"cuda\", dtype=dtype)\n", + " N = 3\n", + " return {\"input\": input, \"output\": output, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + " # basic_small\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # all_zeros\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # negative_numbers\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([-1.0, -2.0, -3.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # mixed_positive_negative\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1.0, -2.0, 3.0, -4.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # very_small_numbers\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1e-6, 1e-7, 1e-8, 1e-9], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " # large_numbers\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([10.0, 15.0, 20.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, device=\"cuda\", dtype=dtype),\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " # single_element\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([5.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(1, device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " }\n", + " )\n", + " # all_same_values\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([2.5] * 10, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(10, device=\"cuda\", dtype=dtype),\n", + " \"N\": 10,\n", + " }\n", + " )\n", + " # large_array\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty(2048, device=\"cuda\", dtype=dtype).uniform_(0.0, 10.0),\n", + " \"output\": torch.empty(2048, device=\"cuda\", dtype=dtype),\n", + " \"N\": 2048,\n", + " }\n", + " )\n", + " # large_max_small_values\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([1000.0, 1.0, 2.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " }\n", + " )\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 500000\n", + " input = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0)\n", + " output = torch.empty(N, device=\"cuda\", dtype=dtype)\n", + " return {\"input\": input, \"output\": output, \"N\": N}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/60_top_p_sampling.ipynb b/challenges/colab_exports/medium/60_top_p_sampling.ipynb new file mode 100644 index 00000000..1cde2178 --- /dev/null +++ b/challenges/colab_exports/medium/60_top_p_sampling.ipynb @@ -0,0 +1,586 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Write a GPU program that implements top-p (nucleus) sampling for LLM inference.\n

    \n\n

    \n Top-p sampling is a text generation technique where you sample from the smallest set of tokens whose cumulative probability exceeds threshold p.\n This balances randomness and quality better than pure top-k or greedy sampling.\n

    \n\n

    \n Given logits (unnormalized scores) from a language model:\n

      \n
    1. Convert logits to probabilities using softmax
    2. \n
    3. Sort tokens by probability (descending)
    4. \n
    5. Find the smallest set where cumulative probability \u2265 p (the \"nucleus\")
    6. \n
    7. Renormalize the nucleus probabilities to sum to 1
    8. \n
    9. Sample a token from the nucleus using the provided random seed
    10. \n
    \n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • Ensure numerical stability when computing softmax
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\n  logits = [1.0, 2.0, 3.0, 0.5]\n  p = 0.9\n  seed = 42\n\nOutput:\n  sampled_token = 2 or 1\n  (tokens with highest probabilities, sampled randomly)\n
    \n\n

    Example 2:

    \n
    \nInput:\n  logits = [10.0, 1.0, 1.0]\n  p = 0.5\n  seed = 123\n\nOutput:\n  sampled_token = 0\n  (single token dominates the probability mass)\n
    \n\n

    Constraints

    \n
      \n
    • 3 ≤ vocab_size ≤ 50,000
    • \n
    • -100.0 ≤ logits[i] ≤ 100.0
    • \n
    • 0.0 < p ≤ 1.0
    • \n
    • 0 ≤ sampled_token < vocab_size
    • \n\n
    • Performance is measured with vocab_size = 50,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\nextern \"C\" void solve(const float* logits, const float* p, const int* seed, int* sampled_token,\n int vocab_size) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n@cute.jit\ndef solve(\n logits: cute.Tensor,\n p: cute.Tensor,\n seed: cute.Tensor,\n sampled_token: cute.Tensor,\n vocab_size: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n@jax.jit\ndef solve(logits: jax.Array, p: jax.Array, seed: jax.Array, vocab_size: int) -> jax.Array:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\nfrom std.gpu import block_dim, block_idx, thread_idx\n\n\n@export\ndef solve(\n logits: UnsafePointer[Float32, MutExternalOrigin],\n p: UnsafePointer[Float32, MutExternalOrigin],\n seed: UnsafePointer[Int32, MutExternalOrigin],\n sampled_token: UnsafePointer[Int32, MutExternalOrigin],\n vocab_size: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\ndef solve(\n logits: torch.Tensor,\n p: torch.Tensor,\n seed: torch.Tensor,\n sampled_token: torch.Tensor,\n vocab_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\ndef solve(\n logits: torch.Tensor,\n p: torch.Tensor,\n seed: torch.Tensor,\n sampled_token: torch.Tensor,\n vocab_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Top-p Sampling\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " logits: torch.Tensor,\n", + " p: torch.Tensor,\n", + " seed: torch.Tensor,\n", + " sampled_token: torch.Tensor,\n", + " vocab_size: int,\n", + " ):\n", + " assert logits.shape == (vocab_size,)\n", + " assert p.shape == (1,)\n", + " assert seed.shape == (1,)\n", + " assert sampled_token.shape == (1,)\n", + " assert logits.dtype == torch.float32\n", + " assert p.dtype == torch.float32\n", + "\n", + " p_value = p.item()\n", + " seed_value = seed.item()\n", + "\n", + " max_logit = torch.max(logits)\n", + " exp_logits = torch.exp(logits - max_logit)\n", + " probs = exp_logits / torch.sum(exp_logits)\n", + "\n", + " sorted_probs, sorted_indices = torch.sort(probs, descending=True)\n", + " cumsum = torch.cumsum(sorted_probs, dim=0)\n", + "\n", + " cutoff_idx = torch.searchsorted(cumsum, p_value, right=False).item()\n", + " cutoff_idx = min(cutoff_idx + 1, vocab_size)\n", + "\n", + " nucleus_probs = sorted_probs[:cutoff_idx]\n", + " nucleus_indices = sorted_indices[:cutoff_idx]\n", + "\n", + " nucleus_probs = nucleus_probs / torch.sum(nucleus_probs)\n", + "\n", + " torch.manual_seed(seed_value)\n", + " sampled_idx = torch.multinomial(nucleus_probs, 1).item()\n", + " sampled_token[0] = nucleus_indices[sampled_idx]\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"logits\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"p\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"seed\": (ctypes.POINTER(ctypes.c_int32), \"in\"),\n", + " \"sampled_token\": (ctypes.POINTER(ctypes.c_int32), \"out\"),\n", + " \"vocab_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " logits = torch.tensor([1.0, 2.0, 3.0, 0.5], device=\"cuda\", dtype=torch.float32)\n", + " p = torch.tensor([0.9], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([42], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + "\n", + " return {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + "\n", + " logits = torch.tensor([1.0, 2.0, 3.0], device=\"cuda\", dtype=torch.float32)\n", + " p = torch.tensor([0.95], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([123], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 3,\n", + " }\n", + " )\n", + "\n", + " logits = torch.randn(10, device=\"cuda\", dtype=torch.float32)\n", + " p = torch.tensor([0.9], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([456], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 10,\n", + " }\n", + " )\n", + "\n", + " logits = torch.randn(100, device=\"cuda\", dtype=torch.float32) * 5.0\n", + " p = torch.tensor([0.85], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([789], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 100,\n", + " }\n", + " )\n", + "\n", + " logits = torch.zeros(50, device=\"cuda\", dtype=torch.float32)\n", + " logits[0] = 10.0\n", + " p = torch.tensor([0.5], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([111], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 50,\n", + " }\n", + " )\n", + "\n", + " logits = torch.randn(500, device=\"cuda\", dtype=torch.float32) * 3.0\n", + " p = torch.tensor([0.92], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([222], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 500,\n", + " }\n", + " )\n", + "\n", + " logits = torch.linspace(-5, 5, 200, device=\"cuda\", dtype=torch.float32)\n", + " p = torch.tensor([0.8], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([333], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 200,\n", + " }\n", + " )\n", + "\n", + " logits = torch.randn(1000, device=\"cuda\", dtype=torch.float32) * 2.0\n", + " p = torch.tensor([0.95], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([444], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 1000,\n", + " }\n", + " )\n", + "\n", + " logits = torch.randn(5000, device=\"cuda\", dtype=torch.float32)\n", + " p = torch.tensor([0.9], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([555], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + " tests.append(\n", + " {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": 5000,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " vocab_size = 50000\n", + " logits = torch.randn(vocab_size, device=\"cuda\", dtype=torch.float32) * 3.0\n", + " p = torch.tensor([0.9], device=\"cuda\", dtype=torch.float32)\n", + " seed = torch.tensor([999], device=\"cuda\", dtype=torch.int32)\n", + " sampled_token = torch.zeros(1, device=\"cuda\", dtype=torch.int32)\n", + "\n", + " return {\n", + " \"logits\": logits,\n", + " \"p\": p,\n", + " \"seed\": seed,\n", + " \"sampled_token\": sampled_token,\n", + " \"vocab_size\": vocab_size,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/61_rope_embedding.ipynb b/challenges/colab_exports/medium/61_rope_embedding.ipynb new file mode 100644 index 00000000..2ff315ee --- /dev/null +++ b/challenges/colab_exports/medium/61_rope_embedding.ipynb @@ -0,0 +1,560 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that computes the Rotary Positional Embedding (RoPE) for a batch of query vectors.\n RoPE is a method for encoding positional information in transformer models by rotating the query and key vectors using precomputed cosine and sine components.\n

    \n

    \n Mathematically, given a query vector $x$ and corresponding cosine and sine vectors, the operation is defined as:\n $$\n \\text{RoPE}(x) = x \\odot \\cos + \\text{rotate\\_half}(x) \\odot \\sin\n $$\n

    \n

    \n Where $\\odot$ denotes element-wise multiplication. The $\\text{rotate\\_half}(x)$ operation swaps the first and second halves of the vector and negates the first half. For a vector of dimension $d$:\n $$\n \\text{rotate\\_half}([x_1, \\dots, x_{d/2}, x_{d/2+1}, \\dots, x_d]) = [-x_{d/2+1}, \\dots, -x_d, x_1, \\dots, x_{d/2}]\n $$\n

    \n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The input tensors Q, cos, and sin have shape (M, D), where M is the number of tokens and D is the head dimension
    • \n
    • D (head dimension) is guaranteed to be an even number
    • \n
    • The final result must be stored in the output variable with the same shape (M, D)
    • \n
    \n

    Example 1:

    \n
    Input:  Q   = [[1.0, 2.0, 3.0, 4.0],\n               [1.0, 1.0, 1.0, 1.0]]\n        Cos = [[1.0, 1.0, 1.0, 1.0],\n               [0.0, 0.0, 0.0, 0.0]]\n        Sin = [[0.0, 0.0, 0.0, 0.0],\n               [1.0, 1.0, 1.0, 1.0]]\nOutput: result = [[1.0, 2.0, 3.0, 4.0],\n                  [-1.0, -1.0, 1.0, 1.0]]\n        (Row 0 is identity via Cos; Row 1 is rotated via Sin)
    \n

    Constraints

    \n
      \n
    • Q, cos, and sin have identical dimensions
    • \n
    • D % 2 == 0
    • \n
    • 1 \u2264 M, D \u2264 10,000
    • \n\n
    • Performance is measured with D = 128, M = 1,048,576
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, cos, sin, output are device pointers\nextern \"C\" void solve(float* Q, float* cos, float* sin, float* output, int M, int D) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, cos, sin, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n cos: cute.Tensor,\n sin: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n D: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, cos, sin are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, cos: jax.Array, sin: jax.Array, M: int, D: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, cos, sin, output are device pointers\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n cos: UnsafePointer[Float32, MutExternalOrigin],\n sin: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n D: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, cos, sin, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, output: torch.Tensor, M: int, D: int\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, cos, sin, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, output: torch.Tensor, M: int, D: int\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Rotary Positional Embedding\",\n", + " atol=1e-4,\n", + " rtol=1e-4,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " cos: torch.Tensor,\n", + " sin: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " D: int,\n", + " ):\n", + " assert Q.shape == (M, D)\n", + " assert cos.shape == (M, D)\n", + " assert sin.shape == (M, D)\n", + " assert output.shape == (M, D)\n", + "\n", + " # rotate_half implementation\n", + " # Split the last dimension into two halves\n", + " x1 = Q[..., : D // 2]\n", + " x2 = Q[..., D // 2 :]\n", + " # Concatenate -x2 and x1\n", + " rotated_Q = torch.cat((-x2, x1), dim=-1)\n", + "\n", + " # RoPE calculation\n", + " # Output = Q * Cos + rotate_half(Q) * Sin\n", + " result = (Q * cos) + (rotated_Q * sin)\n", + "\n", + " output.copy_(result)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"cos\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"sin\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"D\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " M = 1024\n", + " D = 64\n", + " dtype = torch.float32\n", + "\n", + " Q = torch.randn(M, D, device=\"cuda\", dtype=dtype)\n", + " Cos = torch.randn(M, D, device=\"cuda\", dtype=dtype)\n", + " Sin = torch.randn(M, D, device=\"cuda\", dtype=dtype)\n", + " Output = torch.zeros(M, D, device=\"cuda\", dtype=dtype)\n", + "\n", + " return {\n", + " \"Q\": Q,\n", + " \"cos\": Cos,\n", + " \"sin\": Sin,\n", + " \"output\": Output,\n", + " \"M\": M,\n", + " \"D\": D,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + " dtype = torch.float32\n", + "\n", + " # Test 1: Small input\n", + " M = 4\n", + " D = 4\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"cos\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"sin\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(M, D, device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"D\": D,\n", + " }\n", + " )\n", + "\n", + " # Test 2: Larger input\n", + " M = 128\n", + " D = 64\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"cos\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"sin\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(M, D, device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"D\": D,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices: outputs should remain zero when inputs are zero\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 6), device=\"cuda\", dtype=dtype),\n", + " \"cos\": torch.zeros((3, 6), device=\"cuda\", dtype=dtype),\n", + " \"sin\": torch.zeros((3, 6), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(3, 6, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"D\": 6,\n", + " }\n", + " )\n", + "\n", + " # minimal_dims: smallest even D that still allows rotation\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.randn((1, 2), device=\"cuda\", dtype=dtype),\n", + " \"cos\": torch.randn((1, 2), device=\"cuda\", dtype=dtype),\n", + " \"sin\": torch.randn((1, 2), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(1, 2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 1,\n", + " \"D\": 2,\n", + " }\n", + " )\n", + "\n", + " # mixed_values: negative and positive entries\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0, 4.0], [5.0, -6.0, 7.0, -8.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"cos\": torch.tensor(\n", + " [[0.5, 0.5, 0.5, 0.5], [0.1, 0.2, 0.3, 0.4]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"sin\": torch.tensor(\n", + " [[0.5, -0.5, 0.5, -0.5], [0.4, -0.3, 0.2, -0.1]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.zeros(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"D\": 4,\n", + " }\n", + " )\n", + "\n", + " # large_matrices: random uniform values for stress testing\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((256, 128), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"cos\": torch.empty((256, 128), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"sin\": torch.empty((256, 128), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.zeros(256, 128, device=\"cuda\", dtype=dtype),\n", + " \"M\": 256,\n", + " \"D\": 128,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " M = 1024 * 1024 # 1M tokens\n", + " D = 128\n", + " dtype = torch.float32\n", + " return {\n", + " \"Q\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"cos\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"sin\": torch.randn(M, D, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.zeros(M, D, device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"D\": D,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/64_weight_dequantization.ipynb b/challenges/colab_exports/medium/64_weight_dequantization.ipynb new file mode 100644 index 00000000..b5e020c6 --- /dev/null +++ b/challenges/colab_exports/medium/64_weight_dequantization.ipynb @@ -0,0 +1,526 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that \"dequantizes\" a weight matrix on the GPU. You are given an input matrix X of shape [M, N] containing quantized values and a scale matrix S of shape [ceil(M/T), ceil(N/T)], where T is the tile size.\n

    \n

    \n For each element $X_{i,j}$, the corresponding scale factor is $S_{row, col}$ where $row = \\lfloor i / T \\rfloor$ and $col = \\lfloor j / T \\rfloor$.\n The output $Y_{i,j}$ should be computed as:\n $$\n Y_{i,j} = X_{i,j} \\times S_{row, col}\n $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the output buffer Y
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\nM = 4, N = 4, TILE_SIZE = 2\nX = [\n  [10, 10,  5,  5],\n  [10, 10,  5,  5],\n  [ 2,  2,  8,  8],\n  [ 2,  2,  8,  8]\n]\nS = [\n  [0.5, 2.0],\n  [4.0, 0.25]\n]\n\nOutput:\nY = [\n  [ 5.0,  5.0, 10.0, 10.0],\n  [ 5.0,  5.0, 10.0, 10.0],\n  [ 8.0,  8.0,  2.0,  2.0],\n  [ 8.0,  8.0,  2.0,  2.0]\n]\nExplanation:\nTile (0,0) of X is multiplied by S[0,0] (0.5).\nTile (0,1) of X is multiplied by S[0,1] (2.0).\nTile (1,0) is multiplied by S[1,0] (4.0).\nTile (1,1) is multiplied by S[1,1] (0.25).\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N ≤ 8192
    • \n
    • TILE_SIZE ∈ {16, 32, 64, 128}
    • \n\n
    • Performance is measured with M = 8,192, N = 8,192, TILE_SIZE = 128
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// X, S, Y are device pointers\nextern \"C\" void solve(const float* X, const float* S, float* Y, int M, int N, int TILE_SIZE) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# X, S, Y are tensors on the GPU\n@cute.jit\ndef solve(\n X: cute.Tensor,\n S: cute.Tensor,\n Y: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n TILE_SIZE: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# X, S are tensors on the GPU\n@jax.jit\ndef solve(X: jax.Array, S: jax.Array, M: int, N: int, TILE_SIZE: int) -> jax.Array:\n # return output tensor Y directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# X, S, Y are device pointers\n@export\ndef solve(\n X: UnsafePointer[Float32, MutExternalOrigin],\n S: UnsafePointer[Float32, MutExternalOrigin],\n Y: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n TILE_SIZE: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# X, S, Y are tensors on the GPU\ndef solve(X: torch.Tensor, S: torch.Tensor, Y: torch.Tensor, M: int, N: int, TILE_SIZE: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# X, S, Y are tensors on the GPU\ndef solve(X: torch.Tensor, S: torch.Tensor, Y: torch.Tensor, M: int, N: int, TILE_SIZE: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Weight Dequantization\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self, X: torch.Tensor, S: torch.Tensor, Y: torch.Tensor, M: int, N: int, TILE_SIZE: int\n", + " ):\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " assert X.shape == (M, N)\n", + " assert S.shape == (s_rows, s_cols)\n", + " assert Y.shape == (M, N)\n", + " assert X.dtype == torch.float32\n", + " assert S.dtype == torch.float32\n", + " assert Y.dtype == torch.float32\n", + "\n", + " S_expanded = S.repeat_interleave(TILE_SIZE, dim=0)[:M, :]\n", + " S_expanded = S_expanded.repeat_interleave(TILE_SIZE, dim=1)[:, :N]\n", + "\n", + " Y.copy_(X * S_expanded)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"X\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"S\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"Y\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"TILE_SIZE\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " M, N = 256, 256\n", + " TILE_SIZE = 128\n", + " X = torch.randn(M, N, device=\"cuda\", dtype=torch.float32)\n", + " # S shape\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " S = torch.randn(s_rows, s_cols, device=\"cuda\", dtype=torch.float32)\n", + " Y = torch.empty_like(X)\n", + "\n", + " return {\n", + " \"X\": X,\n", + " \"S\": S,\n", + " \"Y\": Y,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"TILE_SIZE\": TILE_SIZE,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + "\n", + " test_configs = [\n", + " # Edge cases - small sizes\n", + " (1, 1, 16),\n", + " (2, 3, 16),\n", + " (4, 4, 16),\n", + " # Power-of-2 sizes\n", + " (64, 64, 32),\n", + " (128, 128, 64),\n", + " (256, 256, 128),\n", + " (512, 512, 128),\n", + " # Non-power-of-2 sizes (padding needed)\n", + " (30, 50, 16),\n", + " (100, 100, 32),\n", + " (130, 200, 128),\n", + " (255, 255, 64),\n", + " # Realistic sizes\n", + " (1024, 1024, 128),\n", + " (2048, 4096, 128),\n", + " ]\n", + "\n", + " for M, N, TILE_SIZE in test_configs:\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " tests.append(\n", + " {\n", + " \"X\": torch.randn(M, N, device=\"cuda\", dtype=torch.float32),\n", + " \"S\": torch.randn(s_rows, s_cols, device=\"cuda\", dtype=torch.float32),\n", + " \"Y\": torch.zeros(M, N, device=\"cuda\", dtype=torch.float32),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"TILE_SIZE\": TILE_SIZE,\n", + " }\n", + " )\n", + "\n", + " # Zero input\n", + " M, N, TILE_SIZE = 64, 64, 32\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " tests.append(\n", + " {\n", + " \"X\": torch.zeros(M, N, device=\"cuda\", dtype=torch.float32),\n", + " \"S\": torch.randn(s_rows, s_cols, device=\"cuda\", dtype=torch.float32),\n", + " \"Y\": torch.zeros(M, N, device=\"cuda\", dtype=torch.float32),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"TILE_SIZE\": TILE_SIZE,\n", + " }\n", + " )\n", + "\n", + " # Negative values\n", + " M, N, TILE_SIZE = 128, 128, 64\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " tests.append(\n", + " {\n", + " \"X\": torch.randn(M, N, device=\"cuda\", dtype=torch.float32).sub_(0.5),\n", + " \"S\": torch.randn(s_rows, s_cols, device=\"cuda\", dtype=torch.float32).sub_(0.5),\n", + " \"Y\": torch.zeros(M, N, device=\"cuda\", dtype=torch.float32),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"TILE_SIZE\": TILE_SIZE,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " M, N = 8192, 8192\n", + " TILE_SIZE = 128\n", + " X = torch.randn(M, N, device=\"cuda\", dtype=torch.float32)\n", + " s_rows = (M + TILE_SIZE - 1) // TILE_SIZE\n", + " s_cols = (N + TILE_SIZE - 1) // TILE_SIZE\n", + " S = torch.randn(s_rows, s_cols, device=\"cuda\", dtype=torch.float32)\n", + " Y = torch.empty_like(X)\n", + "\n", + " return {\n", + " \"X\": X,\n", + " \"S\": S,\n", + " \"Y\": Y,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"TILE_SIZE\": TILE_SIZE,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/67_moe_topk_gating.ipynb b/challenges/colab_exports/medium/67_moe_topk_gating.ipynb new file mode 100644 index 00000000..08d60baf --- /dev/null +++ b/challenges/colab_exports/medium/67_moe_topk_gating.ipynb @@ -0,0 +1,581 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that performs Top-K Gating for Mixture of Experts (MoE) models. Given a logit matrix of shape [M, E] where M is the number of tokens and E is the number of experts, identify the k largest values in each row, extract their indices, and apply softmax to get mixing weights.\n

    \n\n

    \n For each row i, the operation computes:\n $$\n \\begin{align}\n \\text{indices}_i, \\text{vals}_i &= \\text{TopK}(\\text{logits}_i, k) \\\\\n \\text{vals}_i &= \\text{logits}_i[\\text{indices}_i] \\\\\n \\text{weights}_i &= \\text{Softmax}(\\text{vals}_i)\n \\end{align}\n $$\n

    \n\n

    \n The selected experts must remain ordered by descending logit value, matching the order returned by\n topk. The topk_weights array must correspond positionally to\n topk_indices in that same order.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • External libraries are not permitted
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in the topk_weights and topk_indices arrays
    • \n
    \n\n

    Example 1:

    \n
    \nInput:\n  logits = [[1.0, 2.0, 3.0, 4.0],\n            [4.0, 3.0, 2.0, 1.0]]\n  M = 2, E = 4, k = 2\n\nOutput:\n  topk_weights = [[0.7311, 0.2689],\n                  [0.7311, 0.2689]]\n  topk_indices = [[3, 2],\n                  [0, 1]]\n\nExplanation:\nRow 0: Top-2 values are 4.0 and 3.0 at indices 3 and 2.\n       Softmax([4.0, 3.0]) = [0.7311, 0.2689]\nRow 1: Top-2 values are 4.0 and 3.0 at indices 0 and 1.\n       Softmax([4.0, 3.0]) = [0.7311, 0.2689]\n
    \n\n

    Constraints

    \n
      \n
    • 1 \u2264 M \u2264 10,000 (number of tokens)
    • \n
    • 1 \u2264 E \u2264 256 (number of experts)
    • \n
    • 1 \u2264 k \u2264 E (top-k selection, typically k=2)
    • \n
    • All tensors are stored on GPU
    • \n
    • Logits are 32-bit floats
    • \n
    • Indices are 32-bit integers
    • \n\n
    • Performance is measured with M = 1,024, k = 2
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// logits, topk_weights, topk_indices are device pointers\nextern \"C\" void solve(const float* logits, float* topk_weights, int* topk_indices, int M, int E,\n int k) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# logits, topk_weights, topk_indices are tensors on the GPU\n@cute.jit\ndef solve(\n logits: cute.Tensor,\n topk_weights: cute.Tensor,\n topk_indices: cute.Tensor,\n M: cute.Int32,\n E: cute.Int32,\n k: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# logits is a tensor on the GPU\n@jax.jit\ndef solve(logits: jax.Array, M: int, E: int, k: int) -> tuple[jax.Array, jax.Array]:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n@export\ndef solve(\n logits: UnsafePointer[Float32, MutExternalOrigin],\n topk_weights: UnsafePointer[Float32, MutExternalOrigin],\n topk_indices: UnsafePointer[Int32, MutExternalOrigin],\n M: Int32,\n E: Int32,\n k: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# logits, topk_weights, topk_indices are tensors on the GPU\ndef solve(\n logits: torch.Tensor,\n topk_weights: torch.Tensor,\n topk_indices: torch.Tensor,\n M: int,\n E: int,\n k: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# logits, topk_weights, topk_indices are tensors on the GPU\ndef solve(\n logits: torch.Tensor,\n topk_weights: torch.Tensor,\n topk_indices: torch.Tensor,\n M: int,\n E: int,\n k: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"MoE Top-K Gating\", atol=1e-05, rtol=1e-05, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " logits: torch.Tensor,\n", + " topk_weights: torch.Tensor,\n", + " topk_indices: torch.Tensor,\n", + " M: int,\n", + " E: int,\n", + " k: int,\n", + " ):\n", + " \"\"\"\n", + " Computes the Top-K gating for Mixture of Experts.\n", + "\n", + " For each row in logits, select the k highest values, apply softmax to them,\n", + " and return the weights and indices.\n", + " \"\"\"\n", + " assert logits.shape == (M, E)\n", + " assert topk_weights.shape == (M, k)\n", + " assert topk_indices.shape == (M, k)\n", + " assert logits.is_cuda and topk_weights.is_cuda and topk_indices.is_cuda\n", + " assert topk_indices.dtype == torch.int32\n", + "\n", + " # 1. TopK Selection\n", + " # logits: (M, E) -> vals: (M, k), indices: (M, k)\n", + " vals, indices = torch.topk(logits, k, dim=-1)\n", + "\n", + " # 2. Softmax on the top k values\n", + " weights = torch.softmax(vals, dim=-1)\n", + "\n", + " # 3. Write output\n", + " topk_weights.copy_(weights)\n", + " topk_indices.copy_(indices.to(torch.int32))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"logits\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"topk_weights\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"topk_indices\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"E\": (ctypes.c_int, \"in\"),\n", + " \"k\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " M = 2\n", + " E = 4\n", + " k = 2\n", + "\n", + " # Example from problem description\n", + " logits_data = torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0]], device=\"cuda\", dtype=dtype_float\n", + " )\n", + " topk_weights_data = torch.zeros((M, k), device=\"cuda\", dtype=dtype_float)\n", + " topk_indices_data = torch.zeros((M, k), device=\"cuda\", dtype=dtype_int)\n", + "\n", + " return {\n", + " \"logits\": logits_data,\n", + " \"topk_weights\": topk_weights_data,\n", + " \"topk_indices\": topk_indices_data,\n", + " \"M\": M,\n", + " \"E\": E,\n", + " \"k\": k,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " test_cases = []\n", + "\n", + " # Test case 1: Basic example from problem description\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [4.0, 3.0, 2.0, 1.0]], device=\"cuda\", dtype=dtype_float\n", + " ),\n", + " \"topk_weights\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 2,\n", + " \"E\": 4,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 2: k=1 (single expert per token)\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[5.0, 1.0, 3.0], [2.0, 8.0, 4.0], [6.0, 2.0, 9.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"topk_weights\": torch.zeros((3, 1), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((3, 1), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 3,\n", + " \"E\": 3,\n", + " \"k\": 1,\n", + " }\n", + " )\n", + "\n", + " # Test case 3: k=E (all experts)\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [3.0, 1.0, 2.0]], device=\"cuda\", dtype=dtype_float\n", + " ),\n", + " \"topk_weights\": torch.zeros((2, 3), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((2, 3), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 2,\n", + " \"E\": 3,\n", + " \"k\": 3,\n", + " }\n", + " )\n", + "\n", + " # Test case 4: Typical MoE configuration (M=4, E=8, k=2)\n", + " torch.manual_seed(42)\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.randn((4, 8), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_weights\": torch.zeros((4, 2), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((4, 2), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 4,\n", + " \"E\": 8,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 5: Larger E with small k (M=8, E=64, k=2)\n", + " torch.manual_seed(123)\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.randn((8, 64), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_weights\": torch.zeros((8, 2), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((8, 2), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 8,\n", + " \"E\": 64,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 6: Test with negative logits\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.tensor(\n", + " [[-1.0, -2.0, -3.0, -4.0], [-4.0, -1.0, -2.0, -3.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype_float,\n", + " ),\n", + " \"topk_weights\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((2, 2), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 2,\n", + " \"E\": 4,\n", + " \"k\": 2,\n", + " }\n", + " )\n", + "\n", + " # Test case 7: Medium size test (M=100, E=16, k=4)\n", + " torch.manual_seed(456)\n", + " test_cases.append(\n", + " {\n", + " \"logits\": torch.randn((100, 16), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_weights\": torch.zeros((100, 4), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((100, 4), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": 100,\n", + " \"E\": 16,\n", + " \"k\": 4,\n", + " }\n", + " )\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype_float = torch.float32\n", + " dtype_int = torch.int32\n", + " M = 1024\n", + " E = 64\n", + " k = 2\n", + "\n", + " torch.manual_seed(789)\n", + " return {\n", + " \"logits\": torch.randn((M, E), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_weights\": torch.zeros((M, k), device=\"cuda\", dtype=dtype_float),\n", + " \"topk_indices\": torch.zeros((M, k), device=\"cuda\", dtype=dtype_int),\n", + " \"M\": M,\n", + " \"E\": E,\n", + " \"k\": k,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/69_jacobi_stencil_2d.ipynb b/challenges/colab_exports/medium/69_jacobi_stencil_2d.ipynb new file mode 100644 index 00000000..485291dd --- /dev/null +++ b/challenges/colab_exports/medium/69_jacobi_stencil_2d.ipynb @@ -0,0 +1,610 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given a 2D grid of 32-bit floating point values, apply one iteration of the 5-point Jacobi stencil:\n each interior cell of the output is set to the average of its four cardinal neighbors (top, bottom,\n left, right) from the input grid. Boundary cells (first/last row and column) are copied unchanged\n from the input to the output.\n

    \n\n\n \n \n \n \n \n \n\n \n 5-Point Jacobi Stencil\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n \n T\n L\n (2,2)\n R\n B\n\n \n \n \n \n \n\n \n \n Center cell\n \n Neighbors\n \n Boundary\n\n \n output[i,j] = ¼ × (top + bottom + left + right)\n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in output
    • \n
    • Read exclusively from input and write exclusively to output (do not update input)
    • \n
    \n\n

    Example:

    \n

    \nInput ($4 \\times 4$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0 \\\\\n9.0 & 10.0 & 11.0 & 12.0 \\\\\n13.0 & 14.0 & 15.0 & 16.0\n\\end{bmatrix}\n$$\nOutput ($4 \\times 4$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0 \\\\\n9.0 & 10.0 & 11.0 & 12.0 \\\\\n13.0 & 14.0 & 15.0 & 16.0\n\\end{bmatrix}\n$$\nInterior cell $(1,1)$: $0.25 \\times (\\text{input}[0,1] + \\text{input}[2,1] + \\text{input}[1,0] + \\text{input}[1,2])$\n$= 0.25 \\times (2.0 + 10.0 + 5.0 + 7.0) = 6.0$
    \nInterior cell $(1,2)$: $0.25 \\times (\\text{input}[0,2] + \\text{input}[2,2] + \\text{input}[1,1] + \\text{input}[1,3])$\n$= 0.25 \\times (3.0 + 11.0 + 6.0 + 8.0) = 7.0$
    \nInterior cell $(2,1)$: $0.25 \\times (\\text{input}[1,1] + \\text{input}[3,1] + \\text{input}[2,0] + \\text{input}[2,2])$\n$= 0.25 \\times (6.0 + 14.0 + 9.0 + 11.0) = 10.0$
    \nInterior cell $(2,2)$: $0.25 \\times (\\text{input}[1,2] + \\text{input}[3,2] + \\text{input}[2,1] + \\text{input}[2,3])$\n$= 0.25 \\times (7.0 + 15.0 + 10.0 + 12.0) = 11.0$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ rows, cols ≤ 16,384
    • \n
    • Input values are in the range [-100, 100]
    • \n
    • All values are 32-bit floats
    • \n
    • Performance is measured with rows = 8,192, cols = 8,192
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// input, output are device pointers\nextern \"C\" void solve(const float* input, float* output, int rows, int cols) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# input, output are tensors on the GPU\n@cute.jit\ndef solve(input: cute.Tensor, output: cute.Tensor, rows: cute.Int32, cols: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# input is a tensor on the GPU\n@jax.jit\ndef solve(input: jax.Array, rows: int, cols: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# input, output are device pointers\n@export\ndef solve(\n input: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n rows: Int32,\n cols: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, rows: int, cols: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# input, output are tensors on the GPU\ndef solve(input: torch.Tensor, output: torch.Tensor, rows: int, cols: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"2D Jacobi Stencil\",\n", + " atol=1e-05,\n", + " rtol=1e-05,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " input: torch.Tensor,\n", + " output: torch.Tensor,\n", + " rows: int,\n", + " cols: int,\n", + " ):\n", + " assert input.shape == (rows, cols)\n", + " assert output.shape == (rows, cols)\n", + " assert input.dtype == torch.float32\n", + " assert input.device.type == \"cuda\"\n", + "\n", + " # Copy boundary cells unchanged\n", + " output.copy_(input)\n", + "\n", + " # Apply 5-point stencil to interior cells:\n", + " # output[i, j] = 0.25 * (input[i-1,j] + input[i+1,j] + input[i,j-1] + input[i,j+1])\n", + " output[1:-1, 1:-1] = 0.25 * (\n", + " input[0:-2, 1:-1] # top neighbor\n", + " + input[2:, 1:-1] # bottom neighbor\n", + " + input[1:-1, 0:-2] # left neighbor\n", + " + input[1:-1, 2:] # right neighbor\n", + " )\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"input\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"rows\": (ctypes.c_int, \"in\"),\n", + " \"cols\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " input = torch.tensor(\n", + " [\n", + " [1.0, 2.0, 3.0, 4.0],\n", + " [5.0, 6.0, 7.0, 8.0],\n", + " [9.0, 10.0, 11.0, 12.0],\n", + " [13.0, 14.0, 15.0, 16.0],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output = torch.empty((4, 4), device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"input\": input,\n", + " \"output\": output,\n", + " \"rows\": 4,\n", + " \"cols\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # minimal_3x3 (only one interior cell)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty((3, 3), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 3,\n", + " \"cols\": 3,\n", + " }\n", + " )\n", + "\n", + " # minimal_1x1 (all boundary, no interior cells)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[42.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((1, 1), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 1,\n", + " \"cols\": 1,\n", + " }\n", + " )\n", + "\n", + " # single_row (all boundary)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1.0, 2.0, 3.0, 4.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((1, 4), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 1,\n", + " \"cols\": 4,\n", + " }\n", + " )\n", + "\n", + " # single_col (all boundary)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.tensor([[1.0], [2.0], [3.0], [4.0]], device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((4, 1), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 4,\n", + " \"cols\": 1,\n", + " }\n", + " )\n", + "\n", + " # all_zeros (interior should stay zero)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.zeros((16, 16), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((16, 16), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 16,\n", + " \"cols\": 16,\n", + " }\n", + " )\n", + "\n", + " # uniform_constant (interior stays the same when all values equal)\n", + " tests.append(\n", + " {\n", + " \"input\": torch.full((32, 32), 3.14, device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty((32, 32), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 32,\n", + " \"cols\": 32,\n", + " }\n", + " )\n", + "\n", + " # power_of_2_square_64\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((64, 64), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0),\n", + " \"output\": torch.empty((64, 64), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 64,\n", + " \"cols\": 64,\n", + " }\n", + " )\n", + "\n", + " # power_of_2_square_128\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((128, 128), device=\"cuda\", dtype=dtype).uniform_(-10.0, 10.0),\n", + " \"output\": torch.empty((128, 128), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 128,\n", + " \"cols\": 128,\n", + " }\n", + " )\n", + "\n", + " # non_power_of_2_30x30\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((30, 30), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.empty((30, 30), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 30,\n", + " \"cols\": 30,\n", + " }\n", + " )\n", + "\n", + " # non_power_of_2_100x100\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((100, 100), device=\"cuda\", dtype=dtype).uniform_(-3.0, 3.0),\n", + " \"output\": torch.empty((100, 100), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 100,\n", + " \"cols\": 100,\n", + " }\n", + " )\n", + "\n", + " # non_square_255x33\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((255, 33), device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0),\n", + " \"output\": torch.empty((255, 33), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 255,\n", + " \"cols\": 33,\n", + " }\n", + " )\n", + "\n", + " # negative_values_non_square_17x97\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((17, 97), device=\"cuda\", dtype=dtype).uniform_(-100.0, 0.0),\n", + " \"output\": torch.empty((17, 97), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 17,\n", + " \"cols\": 97,\n", + " }\n", + " )\n", + "\n", + " # realistic_medium_512x256\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((512, 256), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.empty((512, 256), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 512,\n", + " \"cols\": 256,\n", + " }\n", + " )\n", + "\n", + " # realistic_large_1024x1024\n", + " tests.append(\n", + " {\n", + " \"input\": torch.empty((1024, 1024), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0),\n", + " \"output\": torch.empty((1024, 1024), device=\"cuda\", dtype=dtype),\n", + " \"rows\": 1024,\n", + " \"cols\": 1024,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " rows = 8192\n", + " cols = 8192\n", + " return {\n", + " \"input\": torch.empty((rows, cols), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"output\": torch.empty((rows, cols), device=\"cuda\", dtype=dtype),\n", + " \"rows\": rows,\n", + " \"cols\": cols,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/6_softmax_attention.ipynb b/challenges/colab_exports/medium/6_softmax_attention.ipynb new file mode 100644 index 00000000..b58969e9 --- /dev/null +++ b/challenges/colab_exports/medium/6_softmax_attention.ipynb @@ -0,0 +1,524 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    Implement a GPU program that computes the softmax attention operation for a given set of matrices. Given the query\n matrix Q of size M\u00d7d, key matrix K of size N\u00d7d, and value matrix\n V of size N\u00d7d, your program should compute the output matrix using the formula:\n $$\\text{Attention}(Q, K, V) = \\text{softmax}\\Bigl( \\frac{QK^T}{\\sqrt{d}} \\Bigr)V,$$ where the softmax function is\n applied row-wise.

    \n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The\n solve function signature must remain unchanged\n
    • \n
    • The final result must be stored in the output matrix\n output\n
    • \n
    \n

    Example 1:

    \n

    \nInput:
    \nQ (2\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0\n\\end{bmatrix}\n$$\nK (3\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 & 0.0 & 0.0 \\\\\n0.0 & 1.0 & 0.0 & 0.0 \\\\\n0.0 & 0.0 & 1.0 & 0.0\n\\end{bmatrix}\n$$\nV (3\u00d74):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 & 3.0 & 4.0 \\\\\n5.0 & 6.0 & 7.0 & 8.0 \\\\\n9.0 & 10.0 & 11.0 & 12.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (2\u00d74):\n$$\n\\begin{bmatrix}\n4.29 & 5.29 & 6.29 & 7.29 \\\\\n5.00 & 6.00 & 7.00 & 8.00\n\\end{bmatrix}\n$$\n

    \n\n

    Example 2:

    \n

    \nInput:
    \nQ (1\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 2.0\n\\end{bmatrix}\n$$\nK (2\u00d72):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 \\\\\n0.0 & 1.0\n\\end{bmatrix}\n$$\nV (2\u00d72):\n$$\n\\begin{bmatrix}\n3.0 & 4.0 \\\\\n5.0 & 6.0\n\\end{bmatrix}\n$$\n

    \n\n

    \nOutput:
    \noutput (1\u00d72):\n$$\n\\begin{bmatrix}\n4.34 & 5.34\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • Matrix Q is of size M\u00d7d and matrices K and V are of size\n N\u00d7d
    • \n
    • 1 ≤ M, N ≤ 100,000
    • \n
    • 1 ≤ d ≤ 128
    • \n\n
    • Performance is measured with M = 512, N = 256
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int M, int N,\n int d) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n d: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on the GPU\n@jax.jit\ndef solve(Q: jax.Array, K: jax.Array, V: jax.Array, M: int, N: int, d: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# Q, K, V, output are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n d: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, N: int, d: int\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor, K: torch.Tensor, V: torch.Tensor, output: torch.Tensor, M: int, N: int, d: int\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Softmax Attention\", atol=1e-04, rtol=1e-04, num_gpus=1, access_tier=\"free\"\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " d: int,\n", + " ):\n", + " scale = d**0.5\n", + " attn = torch.matmul(Q, K.t()) / scale\n", + " attn = torch.softmax(attn, dim=1)\n", + " torch.matmul(attn, V, out=output)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"d\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " Q = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype)\n", + " K = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " V = torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " output = torch.empty(2, 4, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": 2, \"N\": 3, \"d\": 4}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # basic_example\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=\"cuda\", dtype=dtype\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(2, 4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 3,\n", + " \"d\": 4,\n", + " }\n", + " )\n", + "\n", + " # zero_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"K\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"V\": torch.zeros((3, 5), device=\"cuda\", dtype=dtype),\n", + " \"output\": torch.empty(3, 5, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 3,\n", + " \"d\": 5,\n", + " }\n", + " )\n", + "\n", + " # mixed_values\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.tensor(\n", + " [[-1.0, 2.0, -3.0], [4.0, -5.0, 6.0], [-7.0, 8.0, -9.0], [10.0, -11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"K\": torch.tensor(\n", + " [[2.0, -1.0, 3.0], [-4.0, 5.0, -6.0], [7.0, -8.0, 9.0], [-10.0, 11.0, -12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"V\": torch.tensor(\n", + " [[1.0, 0.5, -0.5], [-1.0, 2.0, 3.0], [4.0, -2.0, 1.0], [0.0, 1.0, -1.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"output\": torch.empty(4, 3, device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"N\": 4,\n", + " \"d\": 3,\n", + " }\n", + " )\n", + "\n", + " # large_matrices\n", + " tests.append(\n", + " {\n", + " \"Q\": torch.empty((64, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"K\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"V\": torch.empty((128, 32), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1),\n", + " \"output\": torch.empty(64, 32, device=\"cuda\", dtype=dtype),\n", + " \"M\": 64,\n", + " \"N\": 128,\n", + " \"d\": 32,\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N, d = 512, 256, 128\n", + " Q = torch.empty((512, 128), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " K = torch.empty((256, 128), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " V = torch.empty((256, 128), device=\"cuda\", dtype=dtype).uniform_(-0.1, 0.1)\n", + " output = torch.empty(M, d, device=\"cuda\", dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"M\": M, \"N\": N, \"d\": d}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/70_segmented_prefix_sum.ipynb b/challenges/colab_exports/medium/70_segmented_prefix_sum.ipynb new file mode 100644 index 00000000..4130b7d4 --- /dev/null +++ b/challenges/colab_exports/medium/70_segmented_prefix_sum.ipynb @@ -0,0 +1,552 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given an array of N 32-bit floating point values and an integer array\n flags of the same length, where flags[i] = 1 marks the start of a new\n segment and flags[i] = 0 continues the current segment, compute the\n exclusive prefix sum within each segment and store the result in\n output. The first element is always a segment start\n (flags[0] = 1). Within each segment, output[i] equals the sum of all\n values elements in the same segment that appear before index i, so the\n first element of every segment is always 0.0.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in output
    • \n
    • Read from values and flags; write to output
    • \n
    \n\n

    Example

    \n
    \nInput values: [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]\nInput flags:  [  1,   0,   0,   1,   0,   1]\n\nSegments:     [1.0, 2.0, 3.0] | [4.0, 5.0] | [6.0]\n\nOutput:       [0.0, 1.0, 3.0,   0.0, 4.0,   0.0]\n
    \n

    \n Segment 1: exclusive prefix sums of [1, 2, 3] → [0, 1, 3]
    \n Segment 2: exclusive prefix sums of [4, 5] → [0, 4]
    \n Segment 3: exclusive prefix sums of [6] → [0]\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • flags[0] = 1 always (the first element starts the first segment)
    • \n
    • flags[i] ∈ {0, 1} for all i
    • \n
    • Values are 32-bit floats in the range [-100, 100]
    • \n
    • Performance is measured with N = 50,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// values, flags, output are device pointers\nextern \"C\" void solve(const float* values, const int* flags, float* output, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# values, flags, output are tensors on the GPU\n@cute.jit\ndef solve(values: cute.Tensor, flags: cute.Tensor, output: cute.Tensor, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# values, flags are tensors on the GPU\n@jax.jit\ndef solve(values: jax.Array, flags: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# values, flags, output are device pointers\n@export\ndef solve(\n values: UnsafePointer[Float32, MutExternalOrigin],\n flags: UnsafePointer[Int32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# values, flags, output are tensors on the GPU\ndef solve(values: torch.Tensor, flags: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# values, flags, output are tensors on the GPU\ndef solve(values: torch.Tensor, flags: torch.Tensor, output: torch.Tensor, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Segmented Exclusive Prefix Sum\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " values: torch.Tensor,\n", + " flags: torch.Tensor,\n", + " output: torch.Tensor,\n", + " N: int,\n", + " ):\n", + " assert values.shape == (N,)\n", + " assert flags.shape == (N,)\n", + " assert output.shape == (N,)\n", + " assert values.dtype == torch.float32\n", + " assert flags.dtype == torch.int32\n", + " assert values.device.type == \"cuda\"\n", + "\n", + " # Global exclusive prefix sum (use float64 for accuracy in reference).\n", + " excl = torch.empty(N, dtype=torch.float64, device=\"cuda\")\n", + " excl[0] = 0.0\n", + " if N > 1:\n", + " excl[1:] = torch.cumsum(values[:-1].double(), dim=0)\n", + "\n", + " # The exclusive prefix sum within each segment equals the global exclusive\n", + " # prefix sum minus the global exclusive prefix sum at the segment start.\n", + " # Use segment IDs (0-indexed) to index the per-segment offsets.\n", + " seg_ids = torch.cumsum(flags.long(), dim=0) - 1 # segment index for each element\n", + " seg_mask = flags.bool()\n", + " # excl value at each segment start\n", + " seg_start_excl = excl[seg_mask] # shape: (num_segments,)\n", + " # Broadcast segment start offset to every element in that segment\n", + " per_elem_offset = seg_start_excl[seg_ids]\n", + "\n", + " output.copy_((excl - per_elem_offset).float())\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"values\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"flags\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype_f = torch.float32\n", + " dtype_i = torch.int32\n", + " # Three segments: [1,2,3], [4,5], [6]\n", + " # exclusive prefix sums: [0,1,3], [0,4], [0]\n", + " values = torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], device=\"cuda\", dtype=dtype_f)\n", + " flags = torch.tensor([1, 0, 0, 1, 0, 1], device=\"cuda\", dtype=dtype_i)\n", + " output = torch.empty(6, device=\"cuda\", dtype=dtype_f)\n", + " return {\n", + " \"values\": values,\n", + " \"flags\": flags,\n", + " \"output\": output,\n", + " \"N\": 6,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype_f = torch.float32\n", + " dtype_i = torch.int32\n", + " tests = []\n", + "\n", + " def make_test(vals, segs):\n", + " \"\"\"vals: list of floats, segs: list of segment start indices\"\"\"\n", + " N = len(vals)\n", + " flags = torch.zeros(N, dtype=dtype_i)\n", + " for s in segs:\n", + " flags[s] = 1\n", + " return {\n", + " \"values\": torch.tensor(vals, device=\"cuda\", dtype=dtype_f),\n", + " \"flags\": flags.cuda(),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype_f),\n", + " \"N\": N,\n", + " }\n", + "\n", + " def make_random_test(N, avg_seg_len, seed=None):\n", + " if seed is not None:\n", + " torch.manual_seed(seed)\n", + " vals = torch.empty(N, dtype=dtype_f).uniform_(-10.0, 10.0)\n", + " flags = torch.zeros(N, dtype=dtype_i)\n", + " flags[0] = 1\n", + " i = avg_seg_len\n", + " while i < N:\n", + " flags[i] = 1\n", + " i += max(1, int(torch.randint(1, 2 * avg_seg_len + 1, (1,)).item()))\n", + " return {\n", + " \"values\": vals.cuda(),\n", + " \"flags\": flags.cuda(),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype_f),\n", + " \"N\": N,\n", + " }\n", + "\n", + " # Edge: single element, single segment\n", + " tests.append(make_test([5.0], [0]))\n", + "\n", + " # Edge: two elements, one segment\n", + " tests.append(make_test([3.0, 7.0], [0]))\n", + "\n", + " # Edge: two elements, two segments\n", + " tests.append(make_test([3.0, 7.0], [0, 1]))\n", + "\n", + " # Edge: four elements, all in one segment\n", + " tests.append(make_test([1.0, 2.0, 3.0, 4.0], [0]))\n", + "\n", + " # Four elements, each its own segment (all outputs = 0)\n", + " tests.append(make_test([1.0, -2.0, 3.0, -4.0], [0, 1, 2, 3]))\n", + "\n", + " # Negative values in mixed segments: two segments of length 3\n", + " tests.append(make_test([-1.0, -2.0, -3.0, 5.0, 6.0, -7.0], [0, 3]))\n", + "\n", + " # Power-of-2: N=16, two equal segments\n", + " tests.append(make_test([float(i) for i in range(16)], [0, 8]))\n", + "\n", + " # Power-of-2: N=32, segments of length 4\n", + " tests.append(make_test([1.0] * 32, list(range(0, 32, 4))))\n", + "\n", + " # Power-of-2: N=64, random segment lengths ~8\n", + " tests.append(make_random_test(64, avg_seg_len=8, seed=42))\n", + "\n", + " # Power-of-2: N=128, random segment lengths ~16\n", + " tests.append(make_random_test(128, avg_seg_len=16, seed=7))\n", + "\n", + " # Non-power-of-2: N=30, segments of length ~5\n", + " tests.append(make_random_test(30, avg_seg_len=5, seed=13))\n", + "\n", + " # Non-power-of-2: N=100, small segments of length ~3\n", + " tests.append(make_random_test(100, avg_seg_len=3, seed=99))\n", + "\n", + " # Non-power-of-2: N=255, segments spanning multiple warps\n", + " tests.append(make_random_test(255, avg_seg_len=32, seed=17))\n", + "\n", + " # Realistic: N=1024, segments of length ~64\n", + " tests.append(make_random_test(1024, avg_seg_len=64, seed=11))\n", + "\n", + " # Realistic: N=10000, segments crossing block boundaries\n", + " tests.append(make_random_test(10000, avg_seg_len=256, seed=55))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype_f = torch.float32\n", + " dtype_i = torch.int32\n", + " N = 50_000_000\n", + " torch.manual_seed(42)\n", + " vals = torch.empty(N, dtype=dtype_f).uniform_(-1.0, 1.0)\n", + " flags = torch.zeros(N, dtype=dtype_i)\n", + " flags[0] = 1\n", + " # Segments of average length 256 (crosses many thread blocks)\n", + " seg_starts = torch.arange(256, N, 256, dtype=torch.long)\n", + " flags[seg_starts] = 1\n", + " return {\n", + " \"values\": vals.cuda(),\n", + " \"flags\": flags.cuda(),\n", + " \"output\": torch.empty(N, device=\"cuda\", dtype=dtype_f),\n", + " \"N\": N,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/71_parallel_merge.ipynb b/challenges/colab_exports/medium/71_parallel_merge.ipynb new file mode 100644 index 00000000..2154329f --- /dev/null +++ b/challenges/colab_exports/medium/71_parallel_merge.ipynb @@ -0,0 +1,535 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given two sorted arrays A of length M and B of length\n N, both containing 32-bit floating-point values in non-decreasing order, produce a\n single sorted array C of length M + N containing all elements of\n A and B in non-decreasing order.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final merged result must be stored in C
    • \n
    \n\n

    Example

    \n
    \nInput:\n  A = [1.0, 3.0, 5.0, 7.0],  M = 4\n  B = [2.0, 4.0, 6.0, 8.0],  N = 4\n\nOutput:\n  C = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]\n
    \n\n
    \nInput:\n  A = [-1.0, 1.0, 3.0],  M = 3\n  B = [2.0],             N = 1\n\nOutput:\n  C = [-1.0, 1.0, 2.0, 3.0]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N ≤ 50,000,000
    • \n
    • M + N ≤ 50,000,000
    • \n
    • Both A and B are sorted in non-decreasing order
    • \n
    • Elements are 32-bit floats
    • \n
    • Performance is measured with M = 25,000,000, N = 25,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, B, C are device pointers (i.e. pointers to memory on the GPU)\nextern \"C\" void solve(const float* A, const float* B, float* C, int M, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, B: cute.Tensor, C: cute.Tensor, M: cute.Uint32, N: cute.Uint32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, M: int, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# A, B, C are device pointers (i.e. pointers to memory on the GPU)\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Parallel Merge\",\n", + " atol=0.0,\n", + " rtol=0.0,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " C: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " ):\n", + " assert A.shape == (M,), f\"Expected A.shape=({M},), got {A.shape}\"\n", + " assert B.shape == (N,), f\"Expected B.shape=({N},), got {B.shape}\"\n", + " assert C.shape == (M + N,), f\"Expected C.shape=({M + N},), got {C.shape}\"\n", + " assert A.dtype == torch.float32\n", + " assert B.dtype == torch.float32\n", + " assert C.dtype == torch.float32\n", + " assert A.device.type == \"cuda\"\n", + "\n", + " merged, _ = torch.sort(torch.cat([A, B]))\n", + " C.copy_(merged)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor([1.0, 3.0, 5.0, 7.0], device=\"cuda\", dtype=dtype)\n", + " B = torch.tensor([2.0, 4.0, 6.0, 8.0], device=\"cuda\", dtype=dtype)\n", + " M, N = 4, 4\n", + " C = torch.empty(M + N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"M\": M, \"N\": N}\n", + "\n", + " def _make_test(self, M: int, N: int, lo: float = -10.0, hi: float = 10.0) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A, _ = torch.sort(torch.empty(M, device=\"cuda\", dtype=dtype).uniform_(lo, hi))\n", + " B, _ = torch.sort(torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(lo, hi))\n", + " C = torch.empty(M + N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"M\": M, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # Edge cases \u2014 tiny sizes\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([0.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([1.0], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty(2, device=\"cuda\", dtype=dtype),\n", + " \"M\": 1,\n", + " \"N\": 1,\n", + " }\n", + " )\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([-1.0, 1.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 1,\n", + " \"N\": 3,\n", + " }\n", + " )\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([-1.0, 1.0, 3.0], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([2.0], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 1,\n", + " }\n", + " )\n", + " # All zeros\n", + " tests.append(\n", + " {\n", + " \"A\": torch.zeros(2, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.zeros(2, device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty(4, device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 2,\n", + " }\n", + " )\n", + "\n", + " # Power-of-2 sizes\n", + " tests.append(self._make_test(16, 16))\n", + " tests.append(self._make_test(32, 32, lo=-100.0, hi=0.0)) # all negative\n", + " tests.append(self._make_test(64, 128))\n", + " tests.append(self._make_test(512, 512))\n", + " tests.append(self._make_test(1024, 1024))\n", + "\n", + " # Non-power-of-2 sizes\n", + " tests.append(self._make_test(30, 33))\n", + " tests.append(self._make_test(100, 77))\n", + " tests.append(self._make_test(255, 127))\n", + "\n", + " # A entirely less than B (no interleaving needed)\n", + " A_low, _ = torch.sort(torch.empty(256, device=\"cuda\", dtype=dtype).uniform_(-20.0, -10.0))\n", + " B_high, _ = torch.sort(torch.empty(256, device=\"cuda\", dtype=dtype).uniform_(10.0, 20.0))\n", + " tests.append(\n", + " {\n", + " \"A\": A_low,\n", + " \"B\": B_high,\n", + " \"C\": torch.empty(512, device=\"cuda\", dtype=dtype),\n", + " \"M\": 256,\n", + " \"N\": 256,\n", + " }\n", + " )\n", + "\n", + " # Many duplicate values\n", + " A_dup = torch.sort(torch.randint(0, 5, (128,), device=\"cuda\").to(dtype=dtype)).values\n", + " B_dup = torch.sort(torch.randint(0, 5, (128,), device=\"cuda\").to(dtype=dtype)).values\n", + " tests.append(\n", + " {\n", + " \"A\": A_dup,\n", + " \"B\": B_dup,\n", + " \"C\": torch.empty(256, device=\"cuda\", dtype=dtype),\n", + " \"M\": 128,\n", + " \"N\": 128,\n", + " }\n", + " )\n", + "\n", + " # Realistic size\n", + " tests.append(self._make_test(5000, 7000))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M = 25_000_000\n", + " N = 25_000_000\n", + " A, _ = torch.sort(torch.empty(M, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0))\n", + " B, _ = torch.sort(torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0))\n", + " C = torch.empty(M + N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"B\": B, \"C\": C, \"M\": M, \"N\": N}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/72_stream_compaction.ipynb b/challenges/colab_exports/medium/72_stream_compaction.ipynb new file mode 100644 index 00000000..d9ada0b1 --- /dev/null +++ b/challenges/colab_exports/medium/72_stream_compaction.ipynb @@ -0,0 +1,488 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given a 1D array A of N 32-bit floating point numbers, compact all\n positive elements (A[i] > 0) to the front of the output array out,\n preserving their original relative order. Fill any remaining positions with 0.0.\n Stream compaction is a fundamental GPU primitive used throughout rendering, sparse computation,\n and collision detection.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native GPU features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • \n The first k positions of out must contain the k elements of\n A where A[i] > 0, in their original order\n
    • \n
    • Positions k through N−1 of out must be 0.0
    • \n
    • Elements where A[i] = 0.0 are not selected
    • \n
    \n\n

    Example

    \n
    \nInput:  A = [1.0, -2.0, 3.0, 0.0, -1.0, 4.0]\nOutput: out = [1.0, 3.0, 4.0, 0.0, 0.0, 0.0]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ N ≤ 100,000,000
    • \n
    • −1000.0 ≤ A[i] ≤ 1000.0
    • \n
    • out is pre-allocated with N elements, initialised to 0.0
    • \n
    • Performance is measured with N = 50,000,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, out are device pointers\nextern \"C\" void solve(const float* A, int N, float* out) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, out are tensors on the GPU\n@cute.jit\ndef solve(A: cute.Tensor, N: cute.Uint32, out: cute.Tensor):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A is a tensor on GPU\n@jax.jit\ndef solve(A: jax.Array, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# A, out are device pointers\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n N: Int32,\n out: UnsafePointer[Float32, MutExternalOrigin],\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, out are tensors on the GPU\ndef solve(A: torch.Tensor, N: int, out: torch.Tensor):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# A, out are tensors on the GPU\ndef solve(A: torch.Tensor, N: int, out: torch.Tensor):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Stream Compaction\",\n", + " atol=0.0,\n", + " rtol=0.0,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(self, A: torch.Tensor, N: int, out: torch.Tensor):\n", + " assert A.shape == (N,), f\"Expected A.shape=({N},), got {A.shape}\"\n", + " assert out.shape == (N,), f\"Expected out.shape=({N},), got {out.shape}\"\n", + " assert A.dtype == torch.float32\n", + " assert out.dtype == torch.float32\n", + " assert A.device.type == \"cuda\"\n", + "\n", + " mask = A > 0\n", + " selected = A[mask]\n", + " k = selected.numel()\n", + " out[:k].copy_(selected)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"out\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor([1.0, -2.0, 3.0, 0.0, -1.0, 4.0], device=\"cuda\", dtype=dtype)\n", + " N = 6\n", + " out = torch.zeros(N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"N\": N, \"out\": out}\n", + "\n", + " def _make_test(self, N: int, lo: float = -2.0, hi: float = 2.0) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(lo, hi)\n", + " out = torch.zeros(N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"N\": N, \"out\": out}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # Edge cases \u2014 tiny sizes\n", + " # N=1, zero (not positive, nothing selected)\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([0.0], device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " \"out\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " }\n", + " )\n", + " # N=1, positive (all selected)\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([5.0], device=\"cuda\", dtype=dtype),\n", + " \"N\": 1,\n", + " \"out\": torch.zeros(1, device=\"cuda\", dtype=dtype),\n", + " }\n", + " )\n", + " # N=4, mixed with exact zeros and negatives\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([-1.0, 2.0, 0.0, 4.0], device=\"cuda\", dtype=dtype),\n", + " \"N\": 4,\n", + " \"out\": torch.zeros(4, device=\"cuda\", dtype=dtype),\n", + " }\n", + " )\n", + "\n", + " # Power-of-2 sizes\n", + " # All positive \u2014 every element passes the predicate\n", + " A_all_pos = torch.rand(16, device=\"cuda\", dtype=dtype) + 0.1\n", + " tests.append({\"A\": A_all_pos, \"N\": 16, \"out\": torch.zeros(16, device=\"cuda\", dtype=dtype)})\n", + "\n", + " # All negative \u2014 no element passes the predicate\n", + " A_all_neg = -(torch.rand(32, device=\"cuda\", dtype=dtype) + 0.1)\n", + " tests.append({\"A\": A_all_neg, \"N\": 32, \"out\": torch.zeros(32, device=\"cuda\", dtype=dtype)})\n", + "\n", + " # Mixed, wide range\n", + " tests.append(self._make_test(256, lo=-5.0, hi=5.0))\n", + " tests.append(self._make_test(1024, lo=-10.0, hi=10.0))\n", + "\n", + " # Non-power-of-2\n", + " tests.append(self._make_test(100, lo=-3.0, hi=3.0))\n", + " tests.append(self._make_test(255, lo=-1.0, hi=1.0))\n", + "\n", + " # Realistic size\n", + " tests.append(self._make_test(10000, lo=-100.0, hi=100.0))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " N = 50_000_000\n", + " A = torch.empty(N, device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " out = torch.zeros(N, device=\"cuda\", dtype=dtype)\n", + " return {\"A\": A, \"N\": N, \"out\": out}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/75_sparse_matrix_dense_matrix_multiplication.ipynb b/challenges/colab_exports/medium/75_sparse_matrix_dense_matrix_multiplication.ipynb new file mode 100644 index 00000000..e9e90196 --- /dev/null +++ b/challenges/colab_exports/medium/75_sparse_matrix_dense_matrix_multiplication.ipynb @@ -0,0 +1,645 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a GPU program that multiplies a sparse matrix A of dimensions M × N\n by a dense matrix B of dimensions N × K, producing a dense output matrix\n C of dimensions M × K.\n All matrices are stored in row-major order using 32-bit floats.\n The matrix A is approximately 60–70% sparse (i.e., 60–70% of elements are zero),\n and nnz gives the number of non-zero elements in A.\n

    \n\n

    \n Mathematically, the operation is defined as:\n $$\n C_{ij} = \\sum_{k=0}^{N-1} A_{ik} \\cdot B_{kj} \\quad \\text{for} \\quad i = 0, \\ldots, M-1,\\; j = 0, \\ldots, K-1\n $$\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only GPU native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in matrix C
    • \n
    \n\n

    Example

    \n

    \nInput:
    \nMatrix $A$ ($3 \\times 4$):\n$$\n\\begin{bmatrix}\n2.0 & 0.0 & 0.0 & 1.0 \\\\\n0.0 & 3.0 & 0.0 & 0.0 \\\\\n0.0 & 0.0 & 4.0 & 0.0\n\\end{bmatrix}\n$$\nMatrix $B$ ($4 \\times 2$):\n$$\n\\begin{bmatrix}\n1.0 & 2.0 \\\\\n3.0 & 4.0 \\\\\n5.0 & 6.0 \\\\\n7.0 & 8.0\n\\end{bmatrix}\n$$\nOutput:
    \nMatrix $C$ ($3 \\times 2$):\n$$\n\\begin{bmatrix}\n9.0 & 12.0 \\\\\n9.0 & 12.0 \\\\\n20.0 & 24.0\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N, K ≤ 8,192
    • \n
    • All values in A and B are 32-bit floats in the range [−10, 10]
    • \n
    • The matrix A is approximately 60–70% sparse
    • \n
    • Performance is measured with M = 4,096, N = 2,048, K = 512
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// A, B, C are device pointers\nextern \"C\" void solve(const float* A, const float* B, float* C, int M, int N, int K, int nnz) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# A, B, C are tensors on the GPU\n@cute.jit\ndef solve(\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n nnz: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# A, B are tensors on GPU\n@jax.jit\ndef solve(A: jax.Array, B: jax.Array, M: int, N: int, K: int, nnz: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# A, B, C are device pointers\n@export\ndef solve(\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n nnz: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int, K: int, nnz: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# A, B, C are tensors on the GPU\ndef solve(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, M: int, N: int, K: int, nnz: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Sparse Matrix-Dense Matrix Multiplication\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " C: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " K: int,\n", + " nnz: int,\n", + " ):\n", + " if A.shape == (M * N,):\n", + " A_matrix = A.view(M, N)\n", + " elif A.shape == (M, N):\n", + " A_matrix = A\n", + " else:\n", + " raise AssertionError(\n", + " f\"A.shape {A.shape} does not match expected {(M * N,)} or {(M, N)}\"\n", + " )\n", + " if B.shape == (N * K,):\n", + " B_matrix = B.view(N, K)\n", + " elif B.shape == (N, K):\n", + " B_matrix = B\n", + " else:\n", + " raise AssertionError(\n", + " f\"B.shape {B.shape} does not match expected {(N * K,)} or {(N, K)}\"\n", + " )\n", + " assert C.shape == (M, K) or C.shape == (\n", + " M * K,\n", + " ), f\"C.shape {C.shape} does not match expected {(M, K)} or {(M * K,)}\"\n", + " assert A_matrix.dtype == torch.float32\n", + " assert B_matrix.dtype == torch.float32\n", + " assert A_matrix.device.type == \"cuda\"\n", + " assert B_matrix.device.type == \"cuda\"\n", + " assert C.device.type == \"cuda\"\n", + " result = torch.matmul(A_matrix, B_matrix)\n", + " C.copy_(result.view(C.shape))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"nnz\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " A = torch.tensor(\n", + " [\n", + " [2.0, 0.0, 0.0, 1.0],\n", + " [0.0, 3.0, 0.0, 0.0],\n", + " [0.0, 0.0, 4.0, 0.0],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " B = torch.tensor(\n", + " [\n", + " [1.0, 2.0],\n", + " [3.0, 4.0],\n", + " [5.0, 6.0],\n", + " [7.0, 8.0],\n", + " ],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " )\n", + " C = torch.empty((3, 2), device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": 3,\n", + " \"N\": 4,\n", + " \"K\": 2,\n", + " \"nnz\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " tests = []\n", + "\n", + " # edge_1x1x1\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([[3.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[2.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty((1, 1), device=\"cuda\", dtype=dtype),\n", + " \"M\": 1,\n", + " \"N\": 1,\n", + " \"K\": 1,\n", + " \"nnz\": 1,\n", + " }\n", + " )\n", + "\n", + " # edge_2x2_k1_spmv_like\n", + " tests.append(\n", + " {\n", + " \"A\": torch.tensor([[1.0, 0.0], [0.0, 2.0]], device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[3.0], [4.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty((2, 1), device=\"cuda\", dtype=dtype),\n", + " \"M\": 2,\n", + " \"N\": 2,\n", + " \"K\": 1,\n", + " \"nnz\": 2,\n", + " }\n", + " )\n", + "\n", + " # edge_zero_matrix\n", + " tests.append(\n", + " {\n", + " \"A\": torch.zeros((3, 3), device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], device=\"cuda\", dtype=dtype),\n", + " \"C\": torch.empty((3, 2), device=\"cuda\", dtype=dtype),\n", + " \"M\": 3,\n", + " \"N\": 3,\n", + " \"K\": 2,\n", + " \"nnz\": 0,\n", + " }\n", + " )\n", + "\n", + " # edge_identity_a\n", + " tests.append(\n", + " {\n", + " \"A\": torch.eye(4, device=\"cuda\", dtype=dtype),\n", + " \"B\": torch.tensor(\n", + " [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],\n", + " device=\"cuda\",\n", + " dtype=dtype,\n", + " ),\n", + " \"C\": torch.empty((4, 3), device=\"cuda\", dtype=dtype),\n", + " \"M\": 4,\n", + " \"N\": 4,\n", + " \"K\": 3,\n", + " \"nnz\": 4,\n", + " }\n", + " )\n", + "\n", + " # power_of_2_16x16x8\n", + " M, N, K = 16, 16, 8\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.65\n", + " A_sparse = A_dense * mask\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"B\": torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((M, K), device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": int(mask.sum().item()),\n", + " }\n", + " )\n", + "\n", + " # power_of_2_64x32x16\n", + " M, N, K = 64, 32, 16\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-3.0, 3.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.70\n", + " A_sparse = A_dense * mask\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"B\": torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((M, K), device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": int(mask.sum().item()),\n", + " }\n", + " )\n", + "\n", + " # non_power_of_2_negative_values\n", + " M, N, K = 30, 50, 20\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-5.0, 5.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.65\n", + " A_sparse = A_dense * mask\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"B\": torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-3.0, 3.0),\n", + " \"C\": torch.empty((M, K), device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": int(mask.sum().item()),\n", + " }\n", + " )\n", + "\n", + " # non_power_of_2_255x100x33\n", + " M, N, K = 255, 100, 33\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-2.0, 2.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.70\n", + " A_sparse = A_dense * mask\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"B\": torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((M, K), device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": int(mask.sum().item()),\n", + " }\n", + " )\n", + "\n", + " # realistic_1000x500x64\n", + " M, N, K = 1000, 500, 64\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.65\n", + " A_sparse = A_dense * mask\n", + " tests.append(\n", + " {\n", + " \"A\": A_sparse,\n", + " \"B\": torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0),\n", + " \"C\": torch.empty((M, K), device=\"cuda\", dtype=dtype),\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": int(mask.sum().item()),\n", + " }\n", + " )\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M = 4096\n", + " N = 2048\n", + " K = 512\n", + " A_dense = torch.empty((M, N), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " mask = torch.rand((M, N), device=\"cuda\") > 0.65\n", + " A_sparse = A_dense * mask\n", + " nnz = int(mask.sum().item())\n", + " B = torch.empty((N, K), device=\"cuda\", dtype=dtype).uniform_(-1.0, 1.0)\n", + " C = torch.empty((M, K), device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"A\": A_sparse,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"nnz\": nnz,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/76_adder_transformer.ipynb b/challenges/colab_exports/medium/76_adder_transformer.ipynb new file mode 100644 index 00000000..400ca8f9 --- /dev/null +++ b/challenges/colab_exports/medium/76_adder_transformer.ipynb @@ -0,0 +1,728 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \nRun batched autoregressive inference for a 10-parameter transformer that adds two 10-digit\nnumbers. Given prompts of shape [batch_size, 31] (int32) and a 10-float weight\nbuffer, write output logits of shape [batch_size, 11, 10] — one logit\nrow per decode step over the 10-digit vocabulary (0–9). All tensors are float32 except\nthe int32 prompts.\n

    \n\n

    \nThe model comes from the\nAdderBoard\ncompetition for the smallest autoregressive transformer that adds 10-digit numbers at\n≥99% accuracy. It encodes carry propagation in 10 learned parameters via RoPE geometry,\ntied embeddings, and SwiGLU gating.\n

    \n\n\n \n\n \n \n Token Prompt [B,31]\n\n \n \n Embed: [w0-w1*d², -d]\n \n\n \n \n Unit RMSNorm\n \n\n \n \n Self-Attention (1 head, dim=2)\n\n \n Q Proj [2p]\n \n K Proj [0p]\n \n V Proj [1p]\n\n \n QK Norm + RoPE(ω=2π/19) + Causal Attn\n\n \n\n \n + residual\n \n \n \n\n \n \n +\n \n \n\n \n \n Unit RMSNorm\n \n \n \n\n \n \n MLP: Gate + SwiGLU + Carry [3p]\n \n\n \n \n RMSNorm [2p] + Logits\n \n\n \n Total: 10 parameters (2+2+1+2+1+2)\n\n \n \n \n \n \n\n\n

    Model Architecture

    \n\n

    Single-layer pre-norm transformer. Hidden dim 2, 1 head, head dim 2, vocab 10 (digits\n0–9), tied input/output embeddings.

    \n\n

    Each step runs the full sequence [batch_size, seq_len, 2] through:

    \n\n

    1. Token Embedding (2 parameters: w0, w1)

    \n

    $$e(d) = \\begin{bmatrix} w_0 - w_1 \\cdot d^2 \\\\ -d \\end{bmatrix}$$

    \n\n

    2. Unit RMSNorm (no parameters)

    \n

    $$\\text{UnitRMSNorm}(x) = \\frac{x}{\\sqrt{\\text{mean}(x^2) + \\epsilon}}, \\quad \\epsilon = 10^{-6}$$

    \n\n

    3. Self-Attention (3 parameters: q0, q1, v0)

    \n

    Projections applied to the normed hidden state h with shape [*, 2]:

    \n

    $$Q = \\begin{bmatrix} h_0 \\cdot q_0 \\\\ h_0 \\cdot q_1 \\end{bmatrix}, \\quad\nK = \\begin{bmatrix} h_0 \\\\ 0 \\end{bmatrix}, \\quad\nV = \\begin{bmatrix} h_1 \\cdot v_0 \\\\ 0 \\end{bmatrix}$$

    \n\n

    After projection, Q and K are each normalized with Unit RMSNorm, then RoPE is applied\nwith angular frequency ω = 2π/19:

    \n

    $$\\text{RoPE}(x, p) = \\begin{bmatrix} x_0 \\cos(p\\omega) - x_1 \\sin(p\\omega) \\\\\nx_0 \\sin(p\\omega) + x_1 \\cos(p\\omega) \\end{bmatrix}$$

    \n\n

    Scaled dot-product attention with causal mask uses scale factor:

    \n

    $$\\text{scale} = \\frac{1}{\\sqrt{d_h}} \\cdot S^2$$

    \n

    where $d_h = 2$ is the head dimension and $S^2$ is the QK-norm scale constant\n(see weight table below for exact value).

    \n\n

    The output projection maps [attn_0, attn_1][0, attn_0]\n(no parameters), followed by a residual connection.

    \n\n

    4. MLP (3 parameters: a, c, carry)

    \n

    Applied to the unit-RMSNorm of the post-attention hidden state:

    \n

    $$g_0 = h_0 \\cdot a + h_1 \\cdot c, \\quad g_1 = h_0 \\cdot (a - c / 1000) + h_1 \\cdot c$$

    \n

    $$\\text{base} = h_0, \\quad \\text{up} = [\\text{base}, \\text{base}]$$

    \n

    $$\\text{mix} = \\text{SiLU}([g_0, g_1]) \\odot \\text{up}$$

    \n

    $$\\text{MLP}(h) = \\begin{bmatrix} 0 \\\\ \\text{carry} \\cdot (\\text{mix}_1 - \\text{mix}_0) \\end{bmatrix}$$

    \n

    followed by a residual connection.

    \n\n

    5. Final RMSNorm (2 parameters: n0, n1)

    \n

    Standard RMSNorm with learned weight:

    \n

    $$\\text{out} = \\frac{h}{\\sqrt{\\text{mean}(h^2) + \\epsilon}} \\odot [n_0, n_1]$$

    \n\n

    6. Output Logits (tied with embedding)

    \n

    $$\\text{logits} = \\text{out} \\cdot E^T \\quad \\text{where } E_{d} = e(d)$$

    \n\n

    Autoregressive Decoding

    \n

    Starting from the 31-token prompt, repeat 11 times:

    \n
      \n
    1. Run the full forward pass on the current sequence
    2. \n
    3. Extract logits at the last position → store in output
    4. \n
    5. Append argmax(logits) as the next token
    6. \n
    \n

    The sequence grows from length 31 to 42 over the 11 decode steps.

    \n\n

    Weight Layout

    \n\n \n \n \n \n \n \n \n
    OffsetSizeNameDescription
    02embedEmbedding: e(d) = [w0 - w1*d², -d]
    22q_projQ projection weights [q0, q1]
    41v_projV projection weight v0
    52gateMLP gate weights [a, c]
    71carryMLP carry weight
    82normFinal RMSNorm weight [n0, n1]
    \n\n

    Token Encoding

    \n

    Each input pair (a, b) of 10-digit numbers is encoded as a 31-token sequence:

    \n
    \n[0, a_rev_0, ..., a_rev_9, 0, 0, 0, 0, 0, 0, 0, 0, 0, b_rev_0, ..., b_rev_9, 0]\n
    \n

    where a_rev and b_rev are the digits in least-significant-first order,\nzero-padded to 10 digits. The model then generates 11 output tokens (digits of the sum, also\nleast-significant-first).

    \n\n

    Implementation Requirements

    \n
      \n
    • Implement solve(prompts, output, weights, batch_size) with the exact signature shown (JAX exception: solve(prompts, weights, batch_size) returns the output tensor directly)
    • \n
    • Do not use any external libraries beyond what the framework provides
    • \n
    • The function must write logits into the output buffer (except JAX, which returns it)
    • \n
    • Architecture constants are fixed: vocab_size = 10, hidden_dim = 2,\n head_dim = 2, num_heads = 1, prompt_len = 31,\n decode_steps = 11
    • \n
    • RMSNorm epsilon = 10−6
    • \n
    • RoPE angular frequency ω = 2π/19
    • \n
    • Attention scale = (1/√2) · S² where S² = ln(10) / (√2 · (cos(0.3ω) − cos(0.7ω)))
    • \n
    • SiLU activation: silu(x) = x · sigmoid(x)
    • \n
    \n\n

    Example

    \n

    With batch_size = 2 and pairs (3, 5), (99, 1):

    \n
    \nInput prompts (shape [2, 31]):\n  [0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n  [0, 9, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\nOutput logits shape: [2, 11, 10]\n  (logits at each of 11 decode steps over 10 digit classes)\n\nExpected decoded tokens (via argmax):\n  Pair (3, 5):   sum = 8       → [8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n  Pair (99, 1):  sum = 100     → [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]\n
    \n\n

    Constraints

    \n
      \n
    • batch_size: 1 ≤ batch_size ≤ 100,000
    • \n
    • prompts: 32-bit integer tensor, values in [0, 9]
    • \n
    • weights: 32-bit float tensor with exactly 10 elements
    • \n
    • output: 32-bit float tensor of shape [batch_size, 11, 10]
    • \n
    • Input numbers are in range [0, 9,999,999,999] (10-digit unsigned integers)
    • \n
    • Performance is measured with batch_size = 100,000
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// prompts, output, weights are device pointers\nextern \"C\" void solve(const int* prompts, float* output, const float* weights, int batch_size) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# prompts, output, weights are tensors on the GPU\n@cute.jit\ndef solve(\n prompts: cute.Tensor,\n output: cute.Tensor,\n weights: cute.Tensor,\n batch_size: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# prompts, weights are tensors on GPU\n@jax.jit\ndef solve(prompts: jax.Array, weights: jax.Array, batch_size: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from gpu.host import DeviceContext\nfrom gpu.id import block_dim, block_idx, thread_idx\nfrom memory import UnsafePointer\nfrom math import ceildiv\n\n\n# prompts, output, weights are device pointers\n@export\ndef solve(\n prompts: UnsafePointer[Int32],\n output: UnsafePointer[Float32],\n weights: UnsafePointer[Float32],\n batch_size: Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# prompts, output, weights are tensors on the GPU\ndef solve(prompts: torch.Tensor, output: torch.Tensor, weights: torch.Tensor, batch_size: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# prompts, output, weights are tensors on the GPU\ndef solve(prompts: torch.Tensor, output: torch.Tensor, weights: torch.Tensor, batch_size: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "# Model architecture constants\n", + "VOCAB_SIZE = 10\n", + "MODEL_DIM = 2\n", + "HEAD_DIM = 2\n", + "PROMPT_LEN = 31\n", + "OUTPUT_DIGITS = 11\n", + "RMS_EPS = 1e-6\n", + "\n", + "# Derived constants from the hand-crafted 10-parameter adder model\n", + "EMBED_CONST = 1000.0\n", + "CONST_NORM = math.sqrt(MODEL_DIM)\n", + "DIGIT_SCALE = EMBED_CONST / CONST_NORM\n", + "DECODE_QUAD = 1e-3\n", + "DECODE_CURVATURE = 0.1\n", + "ROPE_PERIOD = 19.0\n", + "OMEGA = 2.0 * math.pi / ROPE_PERIOD\n", + "PEAK_EPS = 0.3\n", + "PHI = OMEGA * (10.0 + PEAK_EPS)\n", + "TARGET_LOGIT_GAP = math.log(10.0)\n", + "ATTN_AMPLITUDE = TARGET_LOGIT_GAP / (\n", + " math.cos(OMEGA * PEAK_EPS) - math.cos(OMEGA * (1.0 - PEAK_EPS))\n", + ")\n", + "QK_NORM_SCALE = math.sqrt(ATTN_AMPLITUDE / math.sqrt(2.0))\n", + "CARRY_ALPHA = 256.0 / CONST_NORM\n", + "ATTN_SCALE = (HEAD_DIM**-0.5) * (QK_NORM_SCALE**2)\n", + "\n", + "# Weight buffer layout (10 parameters total)\n", + "O_EMBED = 0 # [2] embedding: e(d) = [w0 - w1*d^2, -d]\n", + "O_QPROJ = 2 # [2] Q projection weights\n", + "O_VPROJ = 4 # [1] V projection weight\n", + "O_GATE = 5 # [2] MLP gate weights\n", + "O_CARRY = 7 # [1] MLP carry weight\n", + "O_NORM = 8 # [2] final RMSNorm weight\n", + "TOTAL_WEIGHTS = 10\n", + "\n", + "\n", + "def _encode_pair(a: int, b: int) -> list:\n", + " a_digits = [int(c) for c in f\"{a:010d}\"][::-1]\n", + " b_digits = [int(c) for c in f\"{b:010d}\"][::-1]\n", + " return [0] + a_digits + [0] * 9 + b_digits + [0]\n", + "\n", + "\n", + "def _encode_pairs_batch(a_vals: torch.Tensor, b_vals: torch.Tensor, device) -> torch.Tensor:\n", + " batch_size = a_vals.shape[0]\n", + " prompts = torch.zeros(batch_size, PROMPT_LEN, device=device, dtype=torch.int32)\n", + " a = a_vals.clone().to(torch.int64)\n", + " for i in range(10):\n", + " prompts[:, 1 + i] = (a % 10).to(torch.int32)\n", + " a = a // 10\n", + " b = b_vals.clone().to(torch.int64)\n", + " for i in range(10):\n", + " prompts[:, 20 + i] = (b % 10).to(torch.int32)\n", + " b = b // 10\n", + " return prompts\n", + "\n", + "\n", + "def _init_weights(device) -> torch.Tensor:\n", + " w = torch.zeros(TOTAL_WEIGHTS, device=device, dtype=torch.float32)\n", + " w[O_EMBED] = EMBED_CONST\n", + " w[O_EMBED + 1] = DECODE_QUAD\n", + " w[O_QPROJ] = math.cos(PHI)\n", + " w[O_QPROJ + 1] = -math.sin(PHI)\n", + " w[O_VPROJ] = -22.0 * DIGIT_SCALE\n", + " w[O_GATE] = CARRY_ALPHA * (-94.0) / CONST_NORM\n", + " w[O_GATE + 1] = CARRY_ALPHA * DIGIT_SCALE\n", + " w[O_CARRY] = (100.0 / CARRY_ALPHA) * (1.0 / CONST_NORM)\n", + " w[O_NORM] = (DECODE_CURVATURE / DECODE_QUAD) / CONST_NORM\n", + " w[O_NORM + 1] = -(DIGIT_SCALE / 50.0)\n", + " return w\n", + "\n", + "\n", + "def _unit_rms_norm(x: torch.Tensor) -> torch.Tensor:\n", + " return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + RMS_EPS)\n", + "\n", + "\n", + "def _forward_pass(seq: torch.Tensor, weights: torch.Tensor) -> torch.Tensor:\n", + " batch_size, seq_len = seq.shape\n", + " device = seq.device\n", + "\n", + " embed_w = weights[O_EMBED : O_EMBED + 2]\n", + " q_w = weights[O_QPROJ : O_QPROJ + 2]\n", + " v_w = weights[O_VPROJ]\n", + " gate_w = weights[O_GATE : O_GATE + 2]\n", + " carry_w = weights[O_CARRY]\n", + " norm_w = weights[O_NORM : O_NORM + 2]\n", + "\n", + " digits = torch.arange(VOCAB_SIZE, device=device, dtype=torch.float32)\n", + " embed_table = torch.stack(\n", + " [embed_w[0] - embed_w[1] * digits * digits, -digits], dim=-1\n", + " ) # [10, 2]\n", + "\n", + " h = embed_table[seq.long()] # [batch, seq_len, 2]\n", + "\n", + " # Pre-attention unit RMSNorm (no learned parameters)\n", + " h_norm = _unit_rms_norm(h)\n", + "\n", + " # Q projection: [h0*qw0, h0*qw1]\n", + " q = torch.stack([h_norm[..., 0] * q_w[0], h_norm[..., 0] * q_w[1]], dim=-1)\n", + "\n", + " # K projection: [h0, 0]\n", + " k = torch.stack([h_norm[..., 0], torch.zeros_like(h_norm[..., 0])], dim=-1)\n", + "\n", + " # V projection: [h1*vw, 0]\n", + " v = torch.stack([h_norm[..., 1] * v_w, torch.zeros_like(h_norm[..., 1])], dim=-1)\n", + "\n", + " # QK norm\n", + " q = _unit_rms_norm(q)\n", + " k = _unit_rms_norm(k)\n", + "\n", + " # RoPE\n", + " positions = torch.arange(seq_len, device=device, dtype=torch.float32)\n", + " angles = positions * OMEGA\n", + " cos_a = torch.cos(angles)\n", + " sin_a = torch.sin(angles)\n", + "\n", + " q_rot = torch.stack(\n", + " [q[..., 0] * cos_a - q[..., 1] * sin_a, q[..., 0] * sin_a + q[..., 1] * cos_a], dim=-1\n", + " )\n", + " k_rot = torch.stack(\n", + " [k[..., 0] * cos_a - k[..., 1] * sin_a, k[..., 0] * sin_a + k[..., 1] * cos_a], dim=-1\n", + " )\n", + "\n", + " # Attention: [batch, 1, seq_len, 2]\n", + " q_rot = q_rot.unsqueeze(1)\n", + " k_rot = k_rot.unsqueeze(1)\n", + " v = v.unsqueeze(1)\n", + "\n", + " attn_scores = torch.matmul(q_rot, k_rot.transpose(-2, -1)) * ATTN_SCALE\n", + " causal_mask = torch.triu(\n", + " torch.full((seq_len, seq_len), float(\"-inf\"), device=device), diagonal=1\n", + " )\n", + " attn_scores = attn_scores + causal_mask.unsqueeze(0).unsqueeze(0)\n", + " attn_probs = F.softmax(attn_scores, dim=-1)\n", + " attn_out = torch.matmul(attn_probs, v).squeeze(1) # [batch, seq_len, 2]\n", + "\n", + " # O projection: [0, attn[..., 0]]\n", + " o = torch.stack([torch.zeros_like(attn_out[..., 0]), attn_out[..., 0]], dim=-1)\n", + "\n", + " # Residual\n", + " h = h + o\n", + "\n", + " # Pre-MLP unit RMSNorm\n", + " h_norm2 = _unit_rms_norm(h)\n", + "\n", + " # MLP gate projection\n", + " a_gate = gate_w[0]\n", + " c_gate = gate_w[1]\n", + " g0 = h_norm2[..., 0] * a_gate + h_norm2[..., 1] * c_gate\n", + " g1 = h_norm2[..., 0] * (a_gate - c_gate / EMBED_CONST) + h_norm2[..., 1] * c_gate\n", + " gate = torch.stack([g0, g1], dim=-1)\n", + "\n", + " # MLP carry projection with SwiGLU\n", + " base = h_norm2[..., 0]\n", + " up = base.unsqueeze(-1).expand_as(gate)\n", + " mix = F.silu(gate) * up\n", + " mlp_out = torch.stack([torch.zeros_like(base), carry_w * (mix[..., 1] - mix[..., 0])], dim=-1)\n", + "\n", + " # Residual\n", + " h = h + mlp_out\n", + "\n", + " # Final RMSNorm (with learned weight)\n", + " rms = torch.sqrt(torch.mean(h * h, dim=-1, keepdim=True) + RMS_EPS)\n", + " h = (h / rms) * norm_w\n", + "\n", + " # Output projection (tied with embedding)\n", + " logits = h @ embed_table.T # [batch, seq_len, 10]\n", + " return logits\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Adder Transformer Inference\",\n", + " atol=1e-2,\n", + " rtol=1e-2,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " prompts: torch.Tensor,\n", + " output: torch.Tensor,\n", + " weights: torch.Tensor,\n", + " batch_size: int,\n", + " ):\n", + " assert prompts.shape == (batch_size, PROMPT_LEN)\n", + " assert prompts.dtype == torch.int32\n", + " assert prompts.device.type == \"cuda\"\n", + " assert output.shape == (batch_size, OUTPUT_DIGITS, VOCAB_SIZE)\n", + " assert output.dtype == torch.float32\n", + " assert output.device.type == \"cuda\"\n", + " assert weights.shape == (TOTAL_WEIGHTS,)\n", + " assert weights.dtype == torch.float32\n", + " assert weights.device.type == \"cuda\"\n", + "\n", + " seq = prompts.clone()\n", + " for step in range(OUTPUT_DIGITS):\n", + " logits = _forward_pass(seq, weights)\n", + " last_logits = logits[:, -1, :]\n", + " output[:, step, :] = last_logits\n", + " next_token = last_logits.argmax(dim=-1).to(torch.int32)\n", + " seq = torch.cat([seq, next_token.unsqueeze(1)], dim=1)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"prompts\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"weights\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"batch_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " pairs = [(3, 5), (99, 1)]\n", + " batch_size = len(pairs)\n", + " prompts = torch.tensor(\n", + " [_encode_pair(a, b) for a, b in pairs],\n", + " device=device,\n", + " dtype=torch.int32,\n", + " )\n", + " weights = _init_weights(device)\n", + " output = torch.zeros(\n", + " batch_size, OUTPUT_DIGITS, VOCAB_SIZE, device=device, dtype=torch.float32\n", + " )\n", + " return {\n", + " \"prompts\": prompts,\n", + " \"output\": output,\n", + " \"weights\": weights,\n", + " \"batch_size\": batch_size,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " device = \"cuda\"\n", + " tests = []\n", + "\n", + " def _make_test(pairs):\n", + " batch_size = len(pairs)\n", + " prompts = torch.tensor(\n", + " [_encode_pair(a, b) for a, b in pairs],\n", + " device=device,\n", + " dtype=torch.int32,\n", + " )\n", + " weights = _init_weights(device)\n", + " output = torch.zeros(\n", + " batch_size, OUTPUT_DIGITS, VOCAB_SIZE, device=device, dtype=torch.float32\n", + " )\n", + " return {\n", + " \"prompts\": prompts,\n", + " \"output\": output,\n", + " \"weights\": weights,\n", + " \"batch_size\": batch_size,\n", + " }\n", + "\n", + " # Edge: single pair, both zero\n", + " tests.append(_make_test([(0, 0)]))\n", + "\n", + " # Edge: single pair, max carry propagation\n", + " tests.append(_make_test([(9999999999, 1)]))\n", + "\n", + " # Edge: small batch, simple sums\n", + " tests.append(_make_test([(1, 2), (3, 4)]))\n", + "\n", + " # Power-of-2 batch: 16\n", + " torch.manual_seed(42)\n", + " tests.append(\n", + " _make_test(\n", + " [\n", + " (torch.randint(0, 10**10, (1,)).item(), torch.randint(0, 10**10, (1,)).item())\n", + " for _ in range(16)\n", + " ]\n", + " )\n", + " )\n", + "\n", + " # Power-of-2 batch: 64\n", + " tests.append(\n", + " _make_test(\n", + " [\n", + " (torch.randint(0, 10**10, (1,)).item(), torch.randint(0, 10**10, (1,)).item())\n", + " for _ in range(64)\n", + " ]\n", + " )\n", + " )\n", + "\n", + " # Non-power-of-2: 30\n", + " tests.append(\n", + " _make_test(\n", + " [\n", + " (torch.randint(0, 10**10, (1,)).item(), torch.randint(0, 10**10, (1,)).item())\n", + " for _ in range(30)\n", + " ]\n", + " )\n", + " )\n", + "\n", + " # Non-power-of-2: 100\n", + " tests.append(\n", + " _make_test(\n", + " [\n", + " (torch.randint(0, 10**10, (1,)).item(), torch.randint(0, 10**10, (1,)).item())\n", + " for _ in range(100)\n", + " ]\n", + " )\n", + " )\n", + "\n", + " # Realistic: 1000\n", + " tests.append(\n", + " _make_test(\n", + " [\n", + " (torch.randint(0, 10**10, (1,)).item(), torch.randint(0, 10**10, (1,)).item())\n", + " for _ in range(1000)\n", + " ]\n", + " )\n", + " )\n", + "\n", + " # All zeros\n", + " tests.append(_make_test([(0, 0)] * 8))\n", + "\n", + " # Max values\n", + " tests.append(_make_test([(9999999999, 9999999999)] * 4))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " batch_size = 100000\n", + " torch.manual_seed(123)\n", + " a_vals = torch.randint(0, 10**10, (batch_size,), dtype=torch.int64)\n", + " b_vals = torch.randint(0, 10**10, (batch_size,), dtype=torch.int64)\n", + " prompts = _encode_pairs_batch(a_vals, b_vals, device)\n", + " weights = _init_weights(device)\n", + " output = torch.zeros(\n", + " batch_size, OUTPUT_DIGITS, VOCAB_SIZE, device=device, dtype=torch.float32\n", + " )\n", + " return {\n", + " \"prompts\": prompts,\n", + " \"output\": output,\n", + " \"weights\": weights,\n", + " \"batch_size\": batch_size,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/78_2d_fft.ipynb b/challenges/colab_exports/medium/78_2d_fft.ipynb new file mode 100644 index 00000000..ee2b8725 --- /dev/null +++ b/challenges/colab_exports/medium/78_2d_fft.ipynb @@ -0,0 +1,474 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Compute the 2D Discrete Fourier Transform (2D DFT) of a complex-valued signal stored on the GPU.\n Given a 2D complex input signal of shape M × N, compute its 2D DFT spectrum\n using the row-column decomposition: apply a 1D DFT along each row, then a 1D DFT along each\n column of the result. All values are 32-bit floating point.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in spectrum
    • \n
    • \n The input and output are stored as 1D arrays of interleaved real and imaginary parts in\n row-major order: element x[m, n] has its real part at index\n 2*(m*N + n) and imaginary part at index 2*(m*N + n) + 1\n
    • \n
    \n\n

    Example

    \n

    \nInput: M = 2, N = 2
    \nSignal $x[m, n]$ (real part):\n$$\n\\begin{bmatrix}\n1.0 & 0.0 \\\\\n0.0 & 0.0\n\\end{bmatrix}\n$$\nSignal $x[m, n]$ (imaginary part):\n$$\n\\begin{bmatrix}\n0.0 & 0.0 \\\\\n0.0 & 0.0\n\\end{bmatrix}\n$$\nOutput:
    \nSpectrum $X[k, l]$ (real part):\n$$\n\\begin{bmatrix}\n1.0 & 1.0 \\\\\n1.0 & 1.0\n\\end{bmatrix}\n$$\nSpectrum $X[k, l]$ (imaginary part):\n$$\n\\begin{bmatrix}\n0.0 & 0.0 \\\\\n0.0 & 0.0\n\\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N ≤ 4096
    • \n
    • Signal values are 32-bit floating point (real and imaginary parts)
    • \n
    • Performance is measured with M = 2,048, N = 2,048
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// signal, spectrum are device pointers\nextern \"C\" void solve(const float* signal, float* spectrum, int M, int N) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# signal, spectrum are tensors on the GPU\n@cute.jit\ndef solve(signal: cute.Tensor, spectrum: cute.Tensor, M: cute.Int32, N: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# signal is a tensor on GPU\n@jax.jit\ndef solve(signal: jax.Array, M: int, N: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# signal, spectrum are device pointers\n@export\ndef solve(\n signal: UnsafePointer[Float32, MutExternalOrigin],\n spectrum: UnsafePointer[Float32, MutExternalOrigin],\n M: Int32,\n N: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# signal, spectrum are tensors on the GPU\ndef solve(signal: torch.Tensor, spectrum: torch.Tensor, M: int, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# signal, spectrum are tensors on the GPU\ndef solve(signal: torch.Tensor, spectrum: torch.Tensor, M: int, N: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"2D FFT\",\n", + " atol=1e-02,\n", + " rtol=1e-02,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(self, signal: torch.Tensor, spectrum: torch.Tensor, M: int, N: int):\n", + " assert signal.shape == (M * N * 2,)\n", + " assert spectrum.shape == (M * N * 2,)\n", + " assert signal.dtype == torch.float32\n", + " assert spectrum.dtype == torch.float32\n", + " assert signal.device == spectrum.device\n", + "\n", + " sig_ri = signal.view(M, N, 2)\n", + " sig_c = torch.complex(sig_ri[..., 0].contiguous(), sig_ri[..., 1].contiguous())\n", + " spec_c = torch.fft.fft2(sig_c)\n", + " spec_ri = torch.stack((spec_c.real, spec_c.imag), dim=-1).contiguous()\n", + " spectrum.copy_(spec_ri.view(-1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"signal\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"spectrum\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N = 2, 2\n", + " signal = torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], device=\"cuda\", dtype=dtype)\n", + " spectrum = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"M\": M, \"N\": N}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " cases = []\n", + "\n", + " def make_case(M, N, low=-1.0, high=1.0):\n", + " signal = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype).uniform_(low, high)\n", + " spectrum = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"M\": M, \"N\": N}\n", + "\n", + " def make_zero_case(M, N):\n", + " signal = torch.zeros(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " spectrum = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"M\": M, \"N\": N}\n", + "\n", + " def make_impulse_case(M, N):\n", + " signal = torch.zeros(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " signal[0] = 1.0\n", + " spectrum = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"M\": M, \"N\": N}\n", + "\n", + " # Edge cases: small sizes\n", + " cases.append(make_impulse_case(1, 1))\n", + " cases.append(make_zero_case(2, 2))\n", + " cases.append(make_case(1, 4))\n", + "\n", + " # Power-of-2 sizes\n", + " cases.append(make_case(16, 16))\n", + " cases.append(make_case(32, 64))\n", + "\n", + " # Non-power-of-2 sizes\n", + " cases.append(make_case(3, 5))\n", + " cases.append(make_case(30, 30))\n", + "\n", + " # Mixed positive/negative values\n", + " cases.append(make_case(100, 200, low=-5.0, high=5.0))\n", + "\n", + " # Realistic sizes\n", + " cases.append(make_case(256, 256))\n", + " cases.append(make_case(512, 512))\n", + "\n", + " return cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " M, N = 2048, 2048\n", + " signal = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype).normal_(0.0, 1.0)\n", + " spectrum = torch.empty(M * N * 2, device=\"cuda\", dtype=dtype)\n", + " return {\"signal\": signal, \"spectrum\": spectrum, \"M\": M, \"N\": N}\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/80_grouped_query_attention.ipynb b/challenges/colab_exports/medium/80_grouped_query_attention.ipynb new file mode 100644 index 00000000..b6d3c6ef --- /dev/null +++ b/challenges/colab_exports/medium/80_grouped_query_attention.ipynb @@ -0,0 +1,560 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \nImplement Grouped Query Attention (GQA), the attention mechanism used in modern large language\nmodels such as LLaMA-3, Mistral, and Gemma. GQA reduces the KV-cache memory footprint during\ninference by sharing key and value heads across groups of query heads. Given query tensor\nQ with num_q_heads heads and key/value tensors K,\nV each with num_kv_heads heads, compute scaled dot-product attention\nwhere every group of num_q_heads / num_kv_heads consecutive query heads attends to\nthe same key and value head. All tensors use float32.\n

    \n\n\n \n \n Grouped Query Attention (num_q_heads=4, num_kv_heads=2, groups=2)\n\n \n Q heads\n \n Q[0]\n \n Q[1]\n \n Q[2]\n \n Q[3]\n\n \n KV heads\n \n K[0], V[0]\n \n K[1], V[1]\n\n \n \n \n\n \n \n \n\n \n group 0\n group 1\n\n \n Q[0], Q[1] attend to K[0], V[0]\n Q[2], Q[3] attend to K[1], V[1]\n scale = 1 / sqrt(head_dim)\n scores = Q @ K^T * scale\n weights = softmax(scores)\n output = weights @ V\n\n \n \n \n \n \n\n\n

    Implementation Requirements

    \n
      \n
    • Implement the function solve(Q, K, V, output, num_q_heads, num_kv_heads, seq_len, head_dim).
    • \n
    • Do not change the function signature or use external libraries beyond the standard GPU frameworks.
    • \n
    • Write the result into the provided output buffer.
    • \n
    • num_q_heads is always divisible by num_kv_heads.
    • \n
    • Use scaled dot-product attention with scale factor 1 / sqrt(head_dim) and a softmax over the key dimension.
    • \n
    \n\n

    Example

    \n

    \n With num_q_heads = 4, num_kv_heads = 2 (groups of 2), seq_len = 3,\n head_dim = 4:\n

    \n

    \n Input:
    \n $Q_0$ (3×4):\n $$\n \\begin{bmatrix}\n 1 & 0 & 0 & 1 \\\\\n 0 & 1 & 1 & 0 \\\\\n 1 & 1 & 0 & 0\n \\end{bmatrix}\n $$\n $Q_1$ (3×4):\n $$\n \\begin{bmatrix}\n 0 & 1 & 0 & 1 \\\\\n 1 & 0 & 1 & 0 \\\\\n 0 & 0 & 1 & 1\n \\end{bmatrix}\n $$\n $Q_2$ (3×4):\n $$\n \\begin{bmatrix}\n -1 & 0 & 0.5 & 0 \\\\\n 0 & -1 & 0 & 0.5 \\\\\n 0.5 & 0 & -1 & 0\n \\end{bmatrix}\n $$\n $Q_3$ (3×4):\n $$\n \\begin{bmatrix}\n 0 & 0.5 & 0 & -1 \\\\\n 0.5 & 0 & 0 & -1 \\\\\n 0 & 0 & 0.5 & 0.5\n \\end{bmatrix}\n $$\n $K_0$ (3×4):\n $$\n \\begin{bmatrix}\n 1 & 0 & 1 & 0 \\\\\n 0 & 1 & 0 & 1 \\\\\n 1 & 1 & 1 & 1\n \\end{bmatrix}\n $$\n $K_1$ (3×4):\n $$\n \\begin{bmatrix}\n 0 & 1 & 0 & -1 \\\\\n -1 & 0 & 1 & 0 \\\\\n 0 & -1 & 0 & 1\n \\end{bmatrix}\n $$\n $V_0$ (3×4):\n $$\n \\begin{bmatrix}\n 1 & 2 & 3 & 4 \\\\\n 5 & 6 & 7 & 8 \\\\\n 9 & 10 & 11 & 12\n \\end{bmatrix}\n $$\n $V_1$ (3×4):\n $$\n \\begin{bmatrix}\n -1 & -2 & -3 & -4 \\\\\n 2 & 3 & 4 & 5 \\\\\n 6 & 7 & 8 & 9\n \\end{bmatrix}\n $$\n Groups: $Q_0, Q_1 \\to K_0, V_0$; \\quad $Q_2, Q_3 \\to K_1, V_1$\n

    \n

    \n Output (values rounded to 2 decimal places):
    \n $\\text{output}_0$ (3×4):\n $$\n \\begin{bmatrix}\n 5.71 & 6.71 & 7.71 & 8.71 \\\\\n 5.71 & 6.71 & 7.71 & 8.71 \\\\\n 5.71 & 6.71 & 7.71 & 8.71\n \\end{bmatrix}\n $$\n $\\text{output}_1$ (3×4):\n $$\n \\begin{bmatrix}\n 6.07 & 7.07 & 8.07 & 9.07 \\\\\n 5.00 & 6.00 & 7.00 & 8.00 \\\\\n 5.71 & 6.71 & 7.71 & 8.71\n \\end{bmatrix}\n $$\n $\\text{output}_2$ (3×4):\n $$\n \\begin{bmatrix}\n 2.24 & 2.76 & 3.27 & 3.79 \\\\\n 3.96 & 4.70 & 5.44 & 6.17 \\\\\n 2.40 & 2.60 & 2.79 & 2.98\n \\end{bmatrix}\n $$\n $\\text{output}_3$ (3×4):\n $$\n \\begin{bmatrix}\n 0.76 & 0.58 & 0.40 & 0.22 \\\\\n 1.17 & 1.08 & 1.00 & 0.91 \\\\\n 2.84 & 3.37 & 3.91 & 4.44\n \\end{bmatrix}\n $$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ num_kv_headsnum_q_heads ≤ 64
    • \n
    • num_q_heads is divisible by num_kv_heads
    • \n
    • 1 ≤ seq_len ≤ 4,096
    • \n
    • 8 ≤ head_dim ≤ 256; head_dim is a multiple of 8
    • \n
    • All tensor values are float32
    • \n
    • Performance is measured with num_q_heads = 32, num_kv_heads = 8, seq_len = 1,024, head_dim = 128
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output,\n int num_q_heads, int num_kv_heads, int seq_len, int head_dim) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n num_q_heads: cute.Int32,\n num_kv_heads: cute.Int32,\n seq_len: cute.Int32,\n head_dim: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on GPU\n@jax.jit\ndef solve(\n Q: jax.Array,\n K: jax.Array,\n V: jax.Array,\n num_q_heads: int,\n num_kv_heads: int,\n seq_len: int,\n head_dim: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# Q, K, V, output are device pointers\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n num_q_heads: Int32,\n num_kv_heads: Int32,\n seq_len: Int32,\n head_dim: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n num_q_heads: int,\n num_kv_heads: int,\n seq_len: int,\n head_dim: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n num_q_heads: int,\n num_kv_heads: int,\n seq_len: int,\n head_dim: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Grouped Query Attention\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " num_q_heads: int,\n", + " num_kv_heads: int,\n", + " seq_len: int,\n", + " head_dim: int,\n", + " ):\n", + " assert Q.shape == (num_q_heads, seq_len, head_dim)\n", + " assert K.shape == (num_kv_heads, seq_len, head_dim)\n", + " assert V.shape == (num_kv_heads, seq_len, head_dim)\n", + " assert output.shape == (num_q_heads, seq_len, head_dim)\n", + " assert Q.dtype == K.dtype == V.dtype == output.dtype == torch.float32\n", + " assert Q.device.type == \"cuda\"\n", + " assert K.device.type == \"cuda\"\n", + " assert V.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + " assert num_q_heads % num_kv_heads == 0\n", + "\n", + " num_groups = num_q_heads // num_kv_heads\n", + " scale = 1.0 / math.sqrt(head_dim)\n", + "\n", + " # Expand K, V from (num_kv_heads, seq_len, head_dim)\n", + " # to (num_q_heads, seq_len, head_dim) by repeating each KV head num_groups times\n", + " K_expanded = K.repeat_interleave(num_groups, dim=0)\n", + " V_expanded = V.repeat_interleave(num_groups, dim=0)\n", + "\n", + " # Scaled dot-product attention: (num_q_heads, seq_len, seq_len)\n", + " scores = torch.bmm(Q, K_expanded.transpose(1, 2)) * scale\n", + "\n", + " # Softmax over the key dimension\n", + " attn_weights = torch.softmax(scores, dim=-1)\n", + "\n", + " # Weighted sum of values: (num_q_heads, seq_len, head_dim)\n", + " output.copy_(torch.bmm(attn_weights, V_expanded))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"num_q_heads\": (ctypes.c_int, \"in\"),\n", + " \"num_kv_heads\": (ctypes.c_int, \"in\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " \"head_dim\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, num_q_heads, num_kv_heads, seq_len, head_dim, zero_inputs=False):\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " if zero_inputs:\n", + " Q = torch.zeros(num_q_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " K = torch.zeros(num_kv_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " V = torch.zeros(num_kv_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " else:\n", + " Q = torch.randn(num_q_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " K = torch.randn(num_kv_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " V = torch.randn(num_kv_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " output = torch.zeros(num_q_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"num_q_heads\": num_q_heads,\n", + " \"num_kv_heads\": num_kv_heads,\n", + " \"seq_len\": seq_len,\n", + " \"head_dim\": head_dim,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " num_q_heads = 4\n", + " num_kv_heads = 2\n", + " seq_len = 3\n", + " head_dim = 4\n", + "\n", + " Q = torch.tensor(\n", + " [\n", + " [[1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]],\n", + " [[0.0, 1.0, 0.0, 1.0], [1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 1.0]],\n", + " [[-1.0, 0.0, 0.5, 0.0], [0.0, -1.0, 0.0, 0.5], [0.5, 0.0, -1.0, 0.0]],\n", + " [[0.0, 0.5, 0.0, -1.0], [0.5, 0.0, 0.0, -1.0], [0.0, 0.0, 0.5, 0.5]],\n", + " ],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " K = torch.tensor(\n", + " [\n", + " [[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0]],\n", + " [[0.0, 1.0, 0.0, -1.0], [-1.0, 0.0, 1.0, 0.0], [0.0, -1.0, 0.0, 1.0]],\n", + " ],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " V = torch.tensor(\n", + " [\n", + " [[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], [9.0, 10.0, 11.0, 12.0]],\n", + " [[-1.0, -2.0, -3.0, -4.0], [2.0, 3.0, 4.0, 5.0], [6.0, 7.0, 8.0, 9.0]],\n", + " ],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " output = torch.zeros(num_q_heads, seq_len, head_dim, device=device, dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"num_q_heads\": num_q_heads,\n", + " \"num_kv_heads\": num_kv_heads,\n", + " \"seq_len\": seq_len,\n", + " \"head_dim\": head_dim,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge case: MQA (num_kv_heads=1), single token\n", + " tests.append(self._make_test_case(4, 1, 1, 8))\n", + "\n", + " # Edge case: GQA with groups=2, tiny seq\n", + " tests.append(self._make_test_case(2, 1, 2, 4))\n", + "\n", + " # Zero inputs\n", + " tests.append(self._make_test_case(4, 2, 4, 8, zero_inputs=True))\n", + "\n", + " # Power-of-2: groups=4 (LLaMA-3 style ratio)\n", + " tests.append(self._make_test_case(8, 2, 16, 32))\n", + "\n", + " # Power-of-2: seq_len=32, head_dim=64\n", + " tests.append(self._make_test_case(4, 2, 32, 64))\n", + "\n", + " # Non-power-of-2 seq_len\n", + " tests.append(self._make_test_case(4, 2, 30, 32))\n", + "\n", + " # Non-power-of-2 seq_len, different grouping\n", + " tests.append(self._make_test_case(6, 3, 100, 32))\n", + "\n", + " # GQA groups=8 (Mistral style), seq_len=255\n", + " tests.append(self._make_test_case(8, 1, 255, 64))\n", + "\n", + " # MHA equivalent (num_q_heads == num_kv_heads)\n", + " tests.append(self._make_test_case(8, 8, 64, 32))\n", + "\n", + " # Realistic small inference batch\n", + " tests.append(self._make_test_case(8, 2, 128, 64))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # LLaMA-3 8B style: 32 Q heads, 8 KV heads, head_dim=128\n", + " return self._make_test_case(32, 8, 1024, 128)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/81_int4_matmul.ipynb b/challenges/colab_exports/medium/81_int4_matmul.ipynb new file mode 100644 index 00000000..70390653 --- /dev/null +++ b/challenges/colab_exports/medium/81_int4_matmul.ipynb @@ -0,0 +1,538 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a weight-only INT4 quantized matrix multiplication (W4A16), a core kernel used in\n modern LLM inference. Given a float16 activation matrix x of shape\n M × K and a weight matrix stored in packed INT4 format, compute the output\n matrix y = x × WT of shape M × N, where\n W is the dequantized float16 weight matrix of shape N × K.\n

    \n\n

    \n Packing format: Each byte of w_q stores two INT4 weights. The\n high nibble (bits 7–4) holds weight w[n, 2i] and the low nibble (bits\n 3–0) holds w[n, 2i+1]. INT4 values are stored unsigned in the range\n [0, 15] with an offset of 8, so the signed weight is nibble − 8,\n giving values in [−8, 7].\n

    \n\n

    \n Dequantization: Weights are dequantized group-wise. Each contiguous block of\n group_size weights along the K dimension shares one float16 scale:\n

    \n
    \nW[n, k] = (w_q_nibble[n, k] - 8) * scales[n, k // group_size]\n
    \n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The final result must be stored in y
    • \n
    \n\n

    Example

    \n

    \n Input (M = 2, N = 4, K = 4, group_size = 2):\n

    \n

    \n Activations $x$ (float16, $2 \\times 4$):\n $$\n \\begin{bmatrix}\n 1.0 & 0.0 & 1.0 & 0.0 \\\\\n 0.0 & 1.0 & 0.0 & 1.0\n \\end{bmatrix}\n $$\n Packed weights $w\\_q$ (uint8, $4 \\times 2$) with signed INT4 values in brackets:\n $$\n \\begin{bmatrix}\n \\texttt{0x99} & \\texttt{0x99} \\\\\n \\texttt{0xAA} & \\texttt{0xAA} \\\\\n \\texttt{0x77} & \\texttt{0x77} \\\\\n \\texttt{0x88} & \\texttt{0x88}\n \\end{bmatrix}\n \\;\\Rightarrow\\;\n W_{\\text{int4}} =\n \\begin{bmatrix}\n 1 & 1 & 1 & 1 \\\\\n 2 & 2 & 2 & 2 \\\\\n -1 & -1 & -1 & -1 \\\\\n 0 & 0 & 0 & 0\n \\end{bmatrix}\n $$\n Scales (float16, $4 \\times 2$, all entries 0.5):\n $$\n \\begin{bmatrix}\n 0.5 & 0.5 \\\\\n 0.5 & 0.5 \\\\\n 0.5 & 0.5 \\\\\n 0.5 & 0.5\n \\end{bmatrix}\n \\;\\Rightarrow\\;\n W_{\\text{dequant}} =\n \\begin{bmatrix}\n 0.5 & 0.5 & 0.5 & 0.5 \\\\\n 1.0 & 1.0 & 1.0 & 1.0 \\\\\n -0.5 & -0.5 & -0.5 & -0.5 \\\\\n 0.0 & 0.0 & 0.0 & 0.0\n \\end{bmatrix}\n $$\n Output $y = x \\times W^T$ (float16, $2 \\times 4$):\n $$\n \\begin{bmatrix}\n 1.0 & 2.0 & -1.0 & 0.0 \\\\\n 1.0 & 2.0 & -1.0 & 0.0\n \\end{bmatrix}\n $$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M, N ≤ 8,192
    • \n
    • 1 ≤ K ≤ 8,192
    • \n
    • K is divisible by 2 and by group_size
    • \n
    • group_size ∈ {2, 4, 8, 16, 32, 64, 128}
    • \n
    • All tensors are stored in row-major order
    • \n
    • Input dtype: x and scales are float16; w_q is uint8
    • \n
    • Output dtype: y is float16
    • \n
    • Performance is measured with M = 4,096, N = 4,096, K = 4,096, group_size = 128
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n#include \n\n// x, w_q, scales, y are device pointers\nextern \"C\" void solve(const __half* x, const uint8_t* w_q, const __half* scales, __half* y, int M,\n int N, int K, int group_size) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, w_q, scales, y are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n w_q: cute.Tensor,\n scales: cute.Tensor,\n y: cute.Tensor,\n M: cute.Int32,\n N: cute.Int32,\n K: cute.Int32,\n group_size: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, w_q, scales are tensors on GPU\n@jax.jit\ndef solve(\n x: jax.Array,\n w_q: jax.Array,\n scales: jax.Array,\n M: int,\n N: int,\n K: int,\n group_size: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# x, w_q, scales, y are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float16, MutExternalOrigin],\n w_q: UnsafePointer[UInt8, MutExternalOrigin],\n scales: UnsafePointer[Float16, MutExternalOrigin],\n y: UnsafePointer[Float16, MutExternalOrigin],\n M: Int32,\n N: Int32,\n K: Int32,\n group_size: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, w_q, scales, y are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n w_q: torch.Tensor,\n scales: torch.Tensor,\n y: torch.Tensor,\n M: int,\n N: int,\n K: int,\n group_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, w_q, scales, y are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n w_q: torch.Tensor,\n scales: torch.Tensor,\n y: torch.Tensor,\n M: int,\n N: int,\n K: int,\n group_size: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"INT4 Weight-Only Quantized MatMul\",\n", + " atol=1e-02,\n", + " rtol=1e-02,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " w_q: torch.Tensor,\n", + " scales: torch.Tensor,\n", + " y: torch.Tensor,\n", + " M: int,\n", + " N: int,\n", + " K: int,\n", + " group_size: int,\n", + " ):\n", + " assert x.shape == (M, K)\n", + " assert w_q.shape == (N, K // 2)\n", + " assert scales.shape == (N, K // group_size)\n", + " assert y.shape == (M, N)\n", + " assert x.dtype == torch.float16\n", + " assert w_q.dtype == torch.uint8\n", + " assert scales.dtype == torch.float16\n", + " assert y.dtype == torch.float16\n", + " assert x.device.type == \"cuda\"\n", + " assert w_q.device.type == \"cuda\"\n", + " assert scales.device.type == \"cuda\"\n", + " assert y.device.type == \"cuda\"\n", + "\n", + " # Unpack INT4 weights from packed uint8 bytes.\n", + " # w_q[n, i] stores two weights: w[n, 2*i] in the high nibble (bits 7:4)\n", + " # and w[n, 2*i+1] in the low nibble (bits 3:0).\n", + " # INT4 values are stored unsigned (0\u201315) with an offset of 8,\n", + " # so the signed value is nibble - 8, giving range [-8, 7].\n", + " w_high = ((w_q >> 4) & 0xF).to(torch.int32) - 8 # [N, K//2]\n", + " w_low = (w_q & 0xF).to(torch.int32) - 8 # [N, K//2]\n", + "\n", + " # Interleave high and low nibbles to reconstruct [N, K]\n", + " w_int = torch.stack([w_high, w_low], dim=-1).reshape(N, K) # [N, K]\n", + "\n", + " # Apply group-wise scales: dequantize each group\n", + " n_groups = K // group_size\n", + " w_groups = w_int.reshape(N, n_groups, group_size).float() # [N, n_groups, group_size]\n", + " scales_f = scales.float().unsqueeze(-1) # [N, n_groups, 1]\n", + " w_dequant = (w_groups * scales_f).reshape(N, K) # [N, K]\n", + "\n", + " # MatMul: x [M, K] @ w_dequant.T [K, N] = y [M, N]\n", + " y.copy_((x.float() @ w_dequant.T).half())\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"w_q\": (ctypes.POINTER(ctypes.c_uint8), \"in\"),\n", + " \"scales\": (ctypes.POINTER(ctypes.c_uint16), \"in\"),\n", + " \"y\": (ctypes.POINTER(ctypes.c_uint16), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"N\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " \"group_size\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, M: int, N: int, K: int, group_size: int, zero_x: bool = False):\n", + " device = \"cuda\"\n", + " if zero_x:\n", + " x = torch.zeros(M, K, device=device, dtype=torch.float16)\n", + " else:\n", + " x = torch.randn(M, K, device=device, dtype=torch.float16)\n", + " # Random packed INT4 weights: each byte holds two nibbles in [0,15]\n", + " w_q = torch.randint(0, 256, (N, K // 2), dtype=torch.uint8, device=device)\n", + " # Small positive scales to keep magnitudes reasonable\n", + " scales = torch.rand(N, K // group_size, device=device, dtype=torch.float16) * 0.1 + 0.01\n", + " y = torch.empty(M, N, device=device, dtype=torch.float16)\n", + " return {\n", + " \"x\": x,\n", + " \"w_q\": w_q,\n", + " \"scales\": scales,\n", + " \"y\": y,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"group_size\": group_size,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " M, N, K, group_size = 2, 4, 4, 2\n", + "\n", + " x = torch.tensor(\n", + " [[1.0, 0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]],\n", + " device=device,\n", + " dtype=torch.float16,\n", + " )\n", + " # Packed INT4 weights (high nibble first).\n", + " # Row 0: weights [1,1,1,1] \u2192 nibbles stored as [9,9,9,9] \u2192 bytes [0x99, 0x99] = [153, 153]\n", + " # Row 1: weights [2,2,2,2] \u2192 nibbles [10,10,10,10] \u2192 bytes [0xAA, 0xAA] = [170, 170]\n", + " # Row 2: weights [-1,-1,-1,-1] \u2192 nibbles [7,7,7,7] \u2192 bytes [0x77, 0x77] = [119, 119]\n", + " # Row 3: weights [0,0,0,0] \u2192 nibbles [8,8,8,8] \u2192 bytes [0x88, 0x88] = [136, 136]\n", + " w_q = torch.tensor(\n", + " [[153, 153], [170, 170], [119, 119], [136, 136]],\n", + " dtype=torch.uint8,\n", + " device=device,\n", + " )\n", + " # One scale per group (group_size=2 \u2192 2 groups per row), all 0.5\n", + " scales = torch.full((N, K // group_size), 0.5, device=device, dtype=torch.float16)\n", + " y = torch.empty(M, N, device=device, dtype=torch.float16)\n", + "\n", + " return {\n", + " \"x\": x,\n", + " \"w_q\": w_q,\n", + " \"scales\": scales,\n", + " \"y\": y,\n", + " \"M\": M,\n", + " \"N\": N,\n", + " \"K\": K,\n", + " \"group_size\": group_size,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge cases \u2014 tiny K, small group_size\n", + " tests.append(self._make_test_case(1, 2, 4, 2, zero_x=True))\n", + " tests.append(self._make_test_case(2, 4, 4, 2))\n", + " tests.append(self._make_test_case(3, 5, 8, 4))\n", + "\n", + " # Power-of-2 sizes\n", + " tests.append(self._make_test_case(16, 16, 32, 16))\n", + " tests.append(self._make_test_case(32, 64, 64, 32))\n", + " tests.append(self._make_test_case(64, 128, 128, 64))\n", + "\n", + " # Non-power-of-2 sizes\n", + " tests.append(self._make_test_case(30, 50, 64, 32))\n", + " tests.append(self._make_test_case(100, 200, 128, 64))\n", + " tests.append(self._make_test_case(255, 100, 128, 64))\n", + "\n", + " # Realistic LLM inference sizes\n", + " tests.append(self._make_test_case(128, 256, 512, 128))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # Typical LLM weight matrix: 4096\u00d74096 with group_size=128\n", + " return self._make_test_case(4096, 4096, 4096, 128)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/82_linear_recurrence.ipynb b/challenges/colab_exports/medium/82_linear_recurrence.ipynb new file mode 100644 index 00000000..4f696e83 --- /dev/null +++ b/challenges/colab_exports/medium/82_linear_recurrence.ipynb @@ -0,0 +1,503 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Given two matrices a and x, each of shape [B, L] (batch size × sequence length),\n compute the linear recurrence h of shape [B, L] defined by:\n h[b, 0] = x[b, 0] and h[b, t] = a[b, t] × h[b, t−1] + x[b, t] for t ≥ 1.\n All values are float32. This operation is the core computational primitive of\n State Space Models (SSMs) such as Mamba, S4, and H3.\n

    \n\n\n \n \n Linear Recurrence: h[t] = a[t] \u00b7 h[t-1] + x[t]\n \n \n \n \n \n h[0]\n h[1]\n h[2]\n h[3]\n \n \n \n \n \n \n \n \u00d7a[1]\n \n \u00d7a[2]\n \n \u00d7a[3]\n \n \n \n \n \n x[0]\n x[1]\n x[2]\n x[3]\n \n +\n +\n +\n +\n \n \u2026\n\n\n

    Implementation Requirements

    \n
      \n
    • Use only native features (external libraries are not permitted)
    • \n
    • The solve function signature must remain unchanged
    • \n
    • The result must be stored in the output tensor h
    • \n
    \n\n

    Examples

    \n\n

    Example 1 \u2014 exponential decay (a = 0.5, single impulse):

    \n$$\na = \\begin{bmatrix} 0.5 & 0.5 & 0.5 & 0.5 \\end{bmatrix}, \\quad\nx = \\begin{bmatrix} 1.0 & 0.0 & 0.0 & 0.0 \\end{bmatrix}\n$$\n$$\nh = \\begin{bmatrix} 1.0 & 0.5 & 0.25 & 0.125 \\end{bmatrix}\n$$\n\n

    Example 2 \u2014 prefix sum (a = 1, unit inputs):

    \n$$\na = \\begin{bmatrix} 1.0 & 1.0 & 1.0 & 1.0 \\end{bmatrix}, \\quad\nx = \\begin{bmatrix} 1.0 & 1.0 & 1.0 & 1.0 \\end{bmatrix}\n$$\n$$\nh = \\begin{bmatrix} 1.0 & 2.0 & 3.0 & 4.0 \\end{bmatrix}\n$$\n\n

    Full example with B = 2, L = 4:

    \n$$\na = \\begin{bmatrix} 0.5 & 0.5 & 0.5 & 0.5 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\end{bmatrix}, \\quad\nx = \\begin{bmatrix} 1.0 & 0.0 & 0.0 & 0.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\end{bmatrix}\n$$\n$$\nh = \\begin{bmatrix} 1.0 & 0.5 & 0.25 & 0.125 \\\\ 1.0 & 2.0 & 3.0 & 4.0 \\end{bmatrix}\n$$\n\n

    Constraints

    \n
      \n
    • 1 ≤ B ≤ 256 (batch size)
    • \n
    • 1 ≤ L ≤ 65,536 (sequence length)
    • \n
    • All values in a and x are float32
    • \n
    • Performance is measured with B = 64, L = 16,384
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// a, x, h are device pointers\nextern \"C\" void solve(const float* a, const float* x, float* h, int B, int L) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# a, x, h are tensors on the GPU\n@cute.jit\ndef solve(a: cute.Tensor, x: cute.Tensor, h: cute.Tensor, B: cute.Int32, L: cute.Int32):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# a, x are tensors on GPU\n@jax.jit\ndef solve(a: jax.Array, x: jax.Array, B: int, L: int) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.gpu import block_dim, block_idx, thread_idx\nfrom std.memory import UnsafePointer\nfrom std.math import ceildiv\n\n\n# a, x, h are device pointers\n@export\ndef solve(\n a: UnsafePointer[Float32, MutExternalOrigin],\n x: UnsafePointer[Float32, MutExternalOrigin],\n h: UnsafePointer[Float32, MutExternalOrigin],\n B: Int32,\n L: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# a, x, h are tensors on the GPU\ndef solve(a: torch.Tensor, x: torch.Tensor, h: torch.Tensor, B: int, L: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# a, x, h are tensors on the GPU\ndef solve(a: torch.Tensor, x: torch.Tensor, h: torch.Tensor, B: int, L: int):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Linear Recurrence\",\n", + " atol=1e-05,\n", + " rtol=1e-05,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " a: torch.Tensor,\n", + " x: torch.Tensor,\n", + " h: torch.Tensor,\n", + " B: int,\n", + " L: int,\n", + " ):\n", + " assert a.shape == (B, L)\n", + " assert x.shape == (B, L)\n", + " assert h.shape == (B, L)\n", + " assert a.dtype == x.dtype == h.dtype == torch.float32\n", + " assert a.device.type == \"cuda\"\n", + " assert x.device.type == \"cuda\"\n", + " assert h.device.type == \"cuda\"\n", + "\n", + " out = torch.empty_like(x)\n", + " out[:, 0] = x[:, 0]\n", + " for t in range(1, L):\n", + " out[:, t] = a[:, t] * out[:, t - 1] + x[:, t]\n", + " h.copy_(out)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"a\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"h\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"B\": (ctypes.c_int, \"in\"),\n", + " \"L\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, B, L, zero_inputs=False, zero_a=False, unit_a=False):\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " if zero_inputs:\n", + " a = torch.zeros(B, L, device=device, dtype=dtype)\n", + " x = torch.zeros(B, L, device=device, dtype=dtype)\n", + " elif zero_a:\n", + " a = torch.zeros(B, L, device=device, dtype=dtype)\n", + " x = torch.randn(B, L, device=device, dtype=dtype)\n", + " elif unit_a:\n", + " a = torch.ones(B, L, device=device, dtype=dtype)\n", + " x = torch.randn(B, L, device=device, dtype=dtype)\n", + " else:\n", + " a = torch.rand(B, L, device=device, dtype=dtype)\n", + " x = torch.randn(B, L, device=device, dtype=dtype)\n", + " h = torch.empty(B, L, device=device, dtype=dtype)\n", + " return {\"a\": a, \"x\": x, \"h\": h, \"B\": B, \"L\": L}\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " a = torch.tensor(\n", + " [[0.5, 0.5, 0.5, 0.5], [1.0, 1.0, 1.0, 1.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " x = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " h = torch.empty(2, 4, device=device, dtype=dtype)\n", + " return {\"a\": a, \"x\": x, \"h\": h, \"B\": 2, \"L\": 4}\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge case: single element\n", + " tests.append(self._make_test_case(1, 1))\n", + "\n", + " # Edge case: two elements\n", + " tests.append(self._make_test_case(1, 2))\n", + "\n", + " # Zero inputs\n", + " tests.append(self._make_test_case(4, 4, zero_inputs=True))\n", + "\n", + " # a=0 everywhere: h[t] = x[t] (no recurrence)\n", + " tests.append(self._make_test_case(4, 16, zero_a=True))\n", + "\n", + " # a=1 everywhere: h[t] = prefix sum of x\n", + " tests.append(self._make_test_case(4, 16, unit_a=True))\n", + "\n", + " # Power-of-2 sequence length\n", + " tests.append(self._make_test_case(8, 32))\n", + "\n", + " # Power-of-2 sequence length, larger\n", + " tests.append(self._make_test_case(8, 256))\n", + "\n", + " # Non-power-of-2 sequence length\n", + " tests.append(self._make_test_case(4, 30))\n", + "\n", + " # Non-power-of-2 sequence length, larger\n", + " tests.append(self._make_test_case(8, 100))\n", + "\n", + " # Realistic size (SSM hidden state)\n", + " tests.append(self._make_test_case(16, 1024))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # B=64 sequences, L=16384 tokens \u2014 typical long-context SSM workload\n", + " return self._make_test_case(64, 16384)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/84_swiglu_mlp_block.ipynb b/challenges/colab_exports/medium/84_swiglu_mlp_block.ipynb new file mode 100644 index 00000000..577b6914 --- /dev/null +++ b/challenges/colab_exports/medium/84_swiglu_mlp_block.ipynb @@ -0,0 +1,543 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement the SwiGLU MLP block \u2014 the feedforward network used in LLaMA, Mistral, Gemma, and most\n modern large language models. Given an input matrix x of shape\n [M, d_model] and three weight matrices W_gate, W_up\n (each [d_model, d_ffn]), and W_down ([d_ffn, d_model]),\n compute:\n output = (SiLU(x × W_gate) ⊙ (x × W_up)) × W_down,\n where SiLU(z) = z × sigmoid(z) and denotes element-wise\n multiplication. All tensors are float32.\n

    \n\n\n \n \n \n \n \n \n\n \n \n x\n [M, d_model]\n\n \n \n \n x · W_gate\n gate projection\n\n \n \n \n x · W_up\n up projection\n\n \n [M, d_ffn]\n [M, d_ffn]\n\n \n \n\n \n \n SiLU\n\n \n \n\n \n \n\n \n \n \n\n \n \n\n \n \n · W_down\n [M, d_ffn]\n\n \n \n\n \n \n output\n [M, d_model]\n\n \n z · sigmoid(z)\n\n\n

    Implementation Requirements

    \n
      \n
    • Implement the solve function with the signature unchanged.
    • \n
    • Do not use external libraries beyond the framework provided.
    • \n
    • Write the result into output in-place.
    • \n
    \n\n

    Example

    \n

    \n Input: M = 2, d_model = 2, d_ffn = 4\n

    \n

    \n $x$ (float32, $2 \\times 2$):\n $$\n x = \\begin{bmatrix} 1.0 & 0.0 \\\\ 0.0 & 1.0 \\end{bmatrix}\n $$\n $W_\\text{gate}$ and $W_\\text{up}$ (both $2 \\times 4$):\n $$\n W_\\text{gate} = W_\\text{up} =\n \\begin{bmatrix}\n 1.0 & 0.0 & 0.0 & 0.0 \\\\\n 0.0 & 1.0 & 0.0 & 0.0\n \\end{bmatrix}\n $$\n $W_\\text{down}$ ($4 \\times 2$):\n $$\n W_\\text{down} =\n \\begin{bmatrix}\n 1.0 & 0.0 \\\\\n 0.0 & 1.0 \\\\\n 0.0 & 0.0 \\\\\n 0.0 & 0.0\n \\end{bmatrix}\n $$\n

    \n

    \n Intermediate steps:\n $$\n \\text{gate} = x \\cdot W_\\text{gate} =\n \\begin{bmatrix} 1.0 & 0.0 & 0.0 & 0.0 \\\\ 0.0 & 1.0 & 0.0 & 0.0 \\end{bmatrix}\n $$\n $$\n \\text{up} = x \\cdot W_\\text{up} =\n \\begin{bmatrix} 1.0 & 0.0 & 0.0 & 0.0 \\\\ 0.0 & 1.0 & 0.0 & 0.0 \\end{bmatrix}\n $$\n $$\n \\text{SiLU}(1.0) = 1.0 \\times \\sigma(1.0) \\approx 0.7311\n $$\n $$\n \\text{hidden} = \\text{SiLU}(\\text{gate}) \\odot \\text{up} =\n \\begin{bmatrix} 0.7311 & 0.0 & 0.0 & 0.0 \\\\ 0.0 & 0.7311 & 0.0 & 0.0 \\end{bmatrix}\n $$\n

    \n

    \n Output:\n $$\n \\text{output} = \\text{hidden} \\cdot W_\\text{down} \\approx\n \\begin{bmatrix} 0.7311 & 0.0 \\\\ 0.0 & 0.7311 \\end{bmatrix}\n $$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ M ≤ 65,536
    • \n
    • 1 ≤ d_model ≤ 8,192
    • \n
    • 1 ≤ d_ffn ≤ 32,768
    • \n
    • All tensors are float32 on the GPU.
    • \n
    • Input values are in the range [-10, 10].
    • \n
    • \n Performance is measured with M = 512, d_model = 4,096,\n d_ffn = 14,336\n
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// x, W_gate, W_up, W_down, output are device pointers\nextern \"C\" void solve(const float* x, const float* W_gate, const float* W_up, const float* W_down,\n float* output, int M, int d_model, int d_ffn) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, W_gate, W_up, W_down, output are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n W_gate: cute.Tensor,\n W_up: cute.Tensor,\n W_down: cute.Tensor,\n output: cute.Tensor,\n M: cute.Int32,\n d_model: cute.Int32,\n d_ffn: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, W_gate, W_up, W_down are tensors on GPU\n@jax.jit\ndef solve(\n x: jax.Array,\n W_gate: jax.Array,\n W_up: jax.Array,\n W_down: jax.Array,\n M: int,\n d_model: int,\n d_ffn: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from gpu.host import DeviceContext\nfrom gpu.id import block_dim, block_idx, thread_idx\nfrom memory import UnsafePointer\nfrom math import ceildiv\n\n\n# x, W_gate, W_up, W_down, output are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float32],\n W_gate: UnsafePointer[Float32],\n W_up: UnsafePointer[Float32],\n W_down: UnsafePointer[Float32],\n output: UnsafePointer[Float32],\n M: Int32,\n d_model: Int32,\n d_ffn: Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, W_gate, W_up, W_down, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n W_gate: torch.Tensor,\n W_up: torch.Tensor,\n W_down: torch.Tensor,\n output: torch.Tensor,\n M: int,\n d_model: int,\n d_ffn: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, W_gate, W_up, W_down, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n W_gate: torch.Tensor,\n W_up: torch.Tensor,\n W_down: torch.Tensor,\n output: torch.Tensor,\n M: int,\n d_model: int,\n d_ffn: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"SwiGLU MLP Block\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " W_gate: torch.Tensor,\n", + " W_up: torch.Tensor,\n", + " W_down: torch.Tensor,\n", + " output: torch.Tensor,\n", + " M: int,\n", + " d_model: int,\n", + " d_ffn: int,\n", + " ):\n", + " assert x.shape == (M, d_model)\n", + " assert W_gate.shape == (d_model, d_ffn)\n", + " assert W_up.shape == (d_model, d_ffn)\n", + " assert W_down.shape == (d_ffn, d_model)\n", + " assert output.shape == (M, d_model)\n", + " assert (\n", + " x.dtype == W_gate.dtype == W_up.dtype == W_down.dtype == output.dtype == torch.float32\n", + " )\n", + " assert x.device.type == \"cuda\"\n", + " assert W_gate.device.type == \"cuda\"\n", + " assert W_up.device.type == \"cuda\"\n", + " assert W_down.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + "\n", + " gate = x @ W_gate # [M, d_ffn]\n", + " up = x @ W_up # [M, d_ffn]\n", + " hidden = F.silu(gate) * up # [M, d_ffn]\n", + " output.copy_(hidden @ W_down) # [M, d_model]\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"W_gate\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"W_up\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"W_down\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"M\": (ctypes.c_int, \"in\"),\n", + " \"d_model\": (ctypes.c_int, \"in\"),\n", + " \"d_ffn\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, M, d_model, d_ffn, zero_x=False):\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " if zero_x:\n", + " x = torch.zeros(M, d_model, device=device, dtype=dtype)\n", + " else:\n", + " x = torch.randn(M, d_model, device=device, dtype=dtype) * 0.1\n", + " W_gate = torch.randn(d_model, d_ffn, device=device, dtype=dtype) * 0.02\n", + " W_up = torch.randn(d_model, d_ffn, device=device, dtype=dtype) * 0.02\n", + " W_down = torch.randn(d_ffn, d_model, device=device, dtype=dtype) * 0.02\n", + " output = torch.empty(M, d_model, device=device, dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"W_gate\": W_gate,\n", + " \"W_up\": W_up,\n", + " \"W_down\": W_down,\n", + " \"output\": output,\n", + " \"M\": M,\n", + " \"d_model\": d_model,\n", + " \"d_ffn\": d_ffn,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " M, d_model, d_ffn = 2, 2, 4\n", + " # x: each row is a basis vector\n", + " x = torch.tensor(\n", + " [[1.0, 0.0], [0.0, 1.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " # W_gate: [d_model=2, d_ffn=4] \u2014 first two columns are identity, rest zeros\n", + " W_gate = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " # W_up: same layout as W_gate\n", + " W_up = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " # W_down: [d_ffn=4, d_model=2] \u2014 top 2x2 is identity, rest zeros\n", + " W_down = torch.tensor(\n", + " [[1.0, 0.0], [0.0, 1.0], [0.0, 0.0], [0.0, 0.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " output = torch.empty(M, d_model, device=device, dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"W_gate\": W_gate,\n", + " \"W_up\": W_up,\n", + " \"W_down\": W_down,\n", + " \"output\": output,\n", + " \"M\": M,\n", + " \"d_model\": d_model,\n", + " \"d_ffn\": d_ffn,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge cases: single row\n", + " tests.append(self._make_test_case(1, 4, 8))\n", + "\n", + " # Edge case: two rows\n", + " tests.append(self._make_test_case(2, 4, 8))\n", + "\n", + " # Zero input\n", + " tests.append(self._make_test_case(4, 8, 16, zero_x=True))\n", + "\n", + " # Power-of-2 sizes\n", + " tests.append(self._make_test_case(16, 32, 64))\n", + "\n", + " # Power-of-2 larger\n", + " tests.append(self._make_test_case(64, 64, 128))\n", + "\n", + " # Non-power-of-2 M\n", + " tests.append(self._make_test_case(30, 32, 64))\n", + "\n", + " # Non-power-of-2 all dims\n", + " tests.append(self._make_test_case(100, 60, 120))\n", + "\n", + " # Non-power-of-2 M, medium size\n", + " tests.append(self._make_test_case(255, 64, 128))\n", + "\n", + " # Realistic small inference batch (LLaMA-style ratios)\n", + " tests.append(self._make_test_case(128, 256, 512))\n", + "\n", + " # Realistic medium inference batch\n", + " tests.append(self._make_test_case(256, 512, 1024))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # LLaMA-3 8B style: d_model=4096, d_ffn=14336, M=512 (batch=4 x seq=128)\n", + " return self._make_test_case(512, 4096, 14336)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/85_lora_linear.ipynb b/challenges/colab_exports/medium/85_lora_linear.ipynb new file mode 100644 index 00000000..ac5fbe25 --- /dev/null +++ b/challenges/colab_exports/medium/85_lora_linear.ipynb @@ -0,0 +1,552 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a LoRA (Low-Rank Adaptation) linear layer forward pass. Given an input matrix\n x of shape batch × d_in, a base weight matrix W of\n shape d_out × d_in, a LoRA down-projection matrix A of shape\n rank × d_in, and a LoRA up-projection matrix B of shape\n d_out × rank, compute\n output = x × WT + lora_scale × (x × AT) × BT.\n All tensors are float32.\n

    \n\n\n \n\n \n \n x\n B×D_in\n\n \n \n \n \n\n \n \n W\n D_out×D_in\n\n \n \n \n x@Wᵗ\n B×D_out\n\n \n \n A\n rank×D_in\n\n \n \n \n x@Aᵗ\n B×rank\n\n \n \n B\n D_out×rank\n\n \n \n\n \n \n \n α×(x@Aᵗ)@Bᵗ\n B×D_out\n\n \n \n +\n\n \n \n \n output\n B×D_out\n\n \n \n \n \n \n\n\n

    Implementation Requirements

    \n
      \n
    • Implement the solve function; do not change its signature.
    • \n
    • Do not use external libraries beyond those provided.
    • \n
    • Write the result into output.
    • \n
    \n\n

    Examples

    \n

    Example 1:

    \n

    \n$$\nx = \\begin{bmatrix} 1 & 0 & -1 & 2 \\\\ 0 & 1 & 1 & -1 \\end{bmatrix},\\quad\nW = \\begin{bmatrix} 1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 1 & 0 \\end{bmatrix},\\quad\nA = \\begin{bmatrix} 1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\end{bmatrix},\\quad\nB = \\begin{bmatrix} 1 & 0 \\\\ 0 & 1 \\\\ 0 & 0 \\end{bmatrix}\n$$\n

    \n

    With lora_scale = 0.5:

    \n

    \n$$\n\\text{output} = x W^T + 0.5 \\cdot (x A^T) B^T\n= \\begin{bmatrix} 1 & 0 & -1 \\\\ 0 & 1 & 1 \\end{bmatrix}\n+ 0.5 \\cdot \\begin{bmatrix} 1 & 0 \\\\ 0 & 1 \\end{bmatrix} \\begin{bmatrix} 1 & 0 & 0 \\\\ 0 & 1 & 0 \\end{bmatrix}\n= \\begin{bmatrix} 1.5 & 0 & -1 \\\\ 0 & 1.5 & 1 \\end{bmatrix}\n$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ batch ≤ 1,024
    • \n
    • 1 ≤ d_in, d_out ≤ 8,192
    • \n
    • 1 ≤ rank ≤ 256; rank < min(d_in, d_out)
    • \n
    • All tensors are float32 on GPU.
    • \n
    • Performance is measured with batch = 256, d_in = 4,096, d_out = 4,096, rank = 64
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// x, W, A, B, output are device pointers\nextern \"C\" void solve(const float* x, const float* W, const float* A, const float* B, float* output,\n int batch, int d_in, int d_out, int rank, float lora_scale) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, W, A, B, output are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n W: cute.Tensor,\n A: cute.Tensor,\n B: cute.Tensor,\n output: cute.Tensor,\n batch: cute.Int32,\n d_in: cute.Int32,\n d_out: cute.Int32,\n rank: cute.Int32,\n lora_scale: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, W, A, B are tensors on GPU\n@jax.jit\ndef solve(\n x: jax.Array,\n W: jax.Array,\n A: jax.Array,\n B: jax.Array,\n batch: int,\n d_in: int,\n d_out: int,\n rank: int,\n lora_scale: float,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# x, W, A, B, output are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float32, MutExternalOrigin],\n W: UnsafePointer[Float32, MutExternalOrigin],\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n batch: Int32,\n d_in: Int32,\n d_out: Int32,\n rank: Int32,\n lora_scale: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, W, A, B, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n W: torch.Tensor,\n A: torch.Tensor,\n B: torch.Tensor,\n output: torch.Tensor,\n batch: int,\n d_in: int,\n d_out: int,\n rank: int,\n lora_scale: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, W, A, B, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n W: torch.Tensor,\n A: torch.Tensor,\n B: torch.Tensor,\n output: torch.Tensor,\n batch: int,\n d_in: int,\n d_out: int,\n rank: int,\n lora_scale: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"LoRA Linear\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " W: torch.Tensor,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " output: torch.Tensor,\n", + " batch: int,\n", + " d_in: int,\n", + " d_out: int,\n", + " rank: int,\n", + " lora_scale: float,\n", + " ):\n", + " assert x.shape == (batch, d_in)\n", + " assert W.shape == (d_out, d_in)\n", + " assert A.shape == (rank, d_in)\n", + " assert B.shape == (d_out, rank)\n", + " assert output.shape == (batch, d_out)\n", + " assert x.dtype == W.dtype == A.dtype == B.dtype == output.dtype == torch.float32\n", + " assert x.device.type == \"cuda\"\n", + " assert W.device.type == \"cuda\"\n", + " assert A.device.type == \"cuda\"\n", + " assert B.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + "\n", + " # Base linear: output = x @ W^T\n", + " base = torch.mm(x, W.t())\n", + "\n", + " # LoRA path: delta = lora_scale * (x @ A^T) @ B^T\n", + " lora_hidden = torch.mm(x, A.t()) # (batch, rank)\n", + " delta = torch.mm(lora_hidden, B.t()) # (batch, d_out)\n", + "\n", + " output.copy_(base + lora_scale * delta)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"W\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"batch\": (ctypes.c_int, \"in\"),\n", + " \"d_in\": (ctypes.c_int, \"in\"),\n", + " \"d_out\": (ctypes.c_int, \"in\"),\n", + " \"rank\": (ctypes.c_int, \"in\"),\n", + " \"lora_scale\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, batch, d_in, d_out, rank, lora_scale=0.5, zero_x=False):\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " if zero_x:\n", + " x = torch.zeros(batch, d_in, device=device, dtype=dtype)\n", + " else:\n", + " x = torch.randn(batch, d_in, device=device, dtype=dtype)\n", + " W = torch.randn(d_out, d_in, device=device, dtype=dtype) * 0.02\n", + " A = torch.randn(rank, d_in, device=device, dtype=dtype) * 0.02\n", + " B = torch.zeros(d_out, rank, device=device, dtype=dtype)\n", + " output = torch.zeros(batch, d_out, device=device, dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"W\": W,\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"output\": output,\n", + " \"batch\": batch,\n", + " \"d_in\": d_in,\n", + " \"d_out\": d_out,\n", + " \"rank\": rank,\n", + " \"lora_scale\": lora_scale,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " x = torch.tensor([[1.0, 0.0, -1.0, 2.0], [0.0, 1.0, 1.0, -1.0]], device=device, dtype=dtype)\n", + " W = torch.tensor(\n", + " [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " A = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=device, dtype=dtype)\n", + " B = torch.tensor(\n", + " [[1.0, 0.0], [0.0, 1.0], [0.0, 0.0]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " output = torch.zeros(2, 3, device=device, dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"W\": W,\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"output\": output,\n", + " \"batch\": 2,\n", + " \"d_in\": 4,\n", + " \"d_out\": 3,\n", + " \"rank\": 2,\n", + " \"lora_scale\": 0.5,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge case: batch=1, tiny dimensions\n", + " tests.append(self._make_test_case(1, 4, 4, 1))\n", + "\n", + " # Edge case: zero input\n", + " tests.append(self._make_test_case(2, 8, 8, 2, zero_x=True))\n", + "\n", + " # Edge case: rank=1 (minimum LoRA rank)\n", + " tests.append(self._make_test_case(4, 16, 16, 1))\n", + "\n", + " # Power-of-2 dimensions\n", + " tests.append(self._make_test_case(16, 64, 64, 8))\n", + "\n", + " # Power-of-2, non-square\n", + " tests.append(self._make_test_case(32, 128, 64, 16))\n", + "\n", + " # Non-power-of-2 dimensions\n", + " tests.append(self._make_test_case(30, 100, 100, 4))\n", + "\n", + " # Non-power-of-2, mixed\n", + " tests.append(self._make_test_case(7, 255, 128, 8))\n", + "\n", + " # Realistic small: LLM feed-forward style\n", + " tests.append(self._make_test_case(64, 512, 512, 16, lora_scale=0.125))\n", + "\n", + " # Negative inputs\n", + " tests.append(\n", + " {\n", + " \"x\": torch.full((4, 32), -1.0, device=\"cuda\", dtype=torch.float32),\n", + " \"W\": torch.randn(32, 32, device=\"cuda\", dtype=torch.float32) * 0.02,\n", + " \"A\": torch.randn(8, 32, device=\"cuda\", dtype=torch.float32) * 0.02,\n", + " \"B\": torch.randn(32, 8, device=\"cuda\", dtype=torch.float32) * 0.02,\n", + " \"output\": torch.zeros(4, 32, device=\"cuda\", dtype=torch.float32),\n", + " \"batch\": 4,\n", + " \"d_in\": 32,\n", + " \"d_out\": 32,\n", + " \"rank\": 8,\n", + " \"lora_scale\": 1.0,\n", + " }\n", + " )\n", + "\n", + " # Larger realistic: transformer hidden size\n", + " tests.append(self._make_test_case(128, 1024, 1024, 32, lora_scale=0.0625))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # LLaMA-style: d_in=d_out=4096, rank=64, batch=256\n", + " return self._make_test_case(256, 4096, 4096, 64, lora_scale=0.015625)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/87_speculative_decoding_verification.ipynb b/challenges/colab_exports/medium/87_speculative_decoding_verification.ipynb new file mode 100644 index 00000000..15e7d975 --- /dev/null +++ b/challenges/colab_exports/medium/87_speculative_decoding_verification.ipynb @@ -0,0 +1,681 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement the token verification step of speculative decoding. A draft model proposes $T$ tokens;\n the target model evaluates them in one forward pass and accepts or rejects each. Given $B$\n sequences, produce the verified output tokens. Probability tensors are float32;\n token tensors are int32.\n

    \n\n

    \n Notation for each sequence $b$, at each draft position $i = 0, \\ldots, T{-}1$:\n

    \n
      \n
    • $t_i = \\texttt{draft_tokens}[b, i]$ — the token proposed by the draft model
    • \n
    • $p_i(v) = \\texttt{draft_probs}[b, i, v]$ — draft model's probability for token $v$
    • \n
    • $q_i(v) = \\texttt{target_probs}[b, i, v]$ — target model's probability for token $v$
    • \n
    • $u_i = \\texttt{uniform_samples}[b, i]$ — pre-generated $U[0,1)$ sample for position $i$
    • \n
    \n\n\n \n\n \n pos 0\n pos 1\n pos 2\n pos 3\n\n \n draft\n \n t₀\n \n t₁\n \n t₂\n \n t₃\n\n \n probs\n \n p(t₀) = 0.60\n q(t₀) = 0.50\n\n \n p(t₁) = 0.50\n q(t₁) = 0.20\n\n \n not reached\n\n \n not reached\n\n \n α, test\n \n α = .50/.60 = .83\n u=0.1 < .83 ✓\n\n \n α = .20/.50 = .40\n u=0.7 ≥ .40 ✗\n\n \n skipped\n\n \n skipped\n\n \n \n reject at pos 1 → stop, resample from adj(v) = max(0, q(v) − p(v))\n normalize adj, inverse-CDF sample using u[b, T] → replacement token t₁′\n\n \n output\n \n t₀\n \n t₁′\n \n 0\n \n 0\n\n \n p = draft prob\n q = target prob\n α = min(1, q/p)\n ■ accepted\n ■ resampled\n ■ pad\n\n \n If all T tokens accepted: sample bonus token from q at last position using u[b, T]\n\n\n

    \n For each sequence $b$, process positions $i = 0, 1, \\ldots, T{-}1$ left-to-right:\n

    \n
      \n
    1. Compute acceptance probability: $\\displaystyle \\alpha_i = \\min\\!\\left(1,\\; \\frac{q_i(t_i)}{p_i(t_i)}\\right)$
    2. \n
    3. If $u_i < \\alpha_i$: accept $t_i$, continue to position $i{+}1$.
    4. \n
    5. If $u_i \\ge \\alpha_i$: reject, stop. Sample replacement from:\n $$\\text{adj}(v) = \\frac{\\max(0,\\; q_i(v) - p_i(v))}{\\sum_{v'} \\max(0,\\; q_i(v') - p_i(v'))}$$\n using inverse CDF with $r = \\texttt{uniform_samples}[b, T]$. If $\\text{adj}$ is all zeros, use uniform $1/V$.\n
    6. \n
    7. If all $T$ tokens accepted: sample a bonus token from $q_{T-1}$ using $\\texttt{uniform_samples}[b, T]$.
    8. \n
    \n

    \n Write results into output_tokens[b, :] (shape $[B, T{+}1]$): accepted/resampled tokens\n fill positions $0$ through the accepted count (inclusive), remaining positions are zero.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Implement solve(draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens, B, T, V).
    • \n
    • Do not change the function signature or use external libraries beyond the standard GPU frameworks.
    • \n
    • Write results into the provided output_tokens buffer (shape [B, T+1], int32).
    • \n
    • Memory layout is row-major: draft_probs[b, i, v] is at offset b*T*V + i*V + v.
    • \n
    • \n Inverse CDF sampling: given distribution $\\text{adj}$ (already normalized), find the\n smallest index $k$ where $\\sum_{v=0}^{k} \\text{adj}(v) \\ge r$, where\n $r = \\texttt{uniform_samples}[b, T]$. Clamp the result to $[0, V-1]$.\n
    • \n
    • \n If the adjusted distribution is all zeros (i.e., $q_i \\le p_i$ everywhere), fall back to\n the uniform distribution over $V$ tokens.\n
    • \n
    \n\n

    Example

    \n

    \n Input: $B = 1,\\; T = 3,\\; V = 4$\n

    \n

    \n $\\text{draft_tokens} = [1, 2, 0]$\n

    \n

    \n Draft probabilities $p_i$ and target probabilities $q_i$ per position:\n $$\n p_0 = \\begin{bmatrix} 0.10 & 0.60 & 0.20 & 0.10 \\end{bmatrix}, \\quad\n q_0 = \\begin{bmatrix} 0.10 & 0.50 & 0.20 & 0.20 \\end{bmatrix}\n $$\n $$\n p_1 = \\begin{bmatrix} 0.10 & 0.20 & 0.50 & 0.20 \\end{bmatrix}, \\quad\n q_1 = \\begin{bmatrix} 0.30 & 0.20 & 0.20 & 0.30 \\end{bmatrix}\n $$\n $$\n \\text{uniform_samples} = \\begin{bmatrix} 0.50 & 0.70 & 0.30 & 0.90 \\end{bmatrix}\n $$\n

    \n

    \n Position 0 (draft token = 1):\n $\\alpha_0 = \\min\\!\\left(1,\\, \\frac{q_0(1)}{p_0(1)}\\right) = \\min\\!\\left(1,\\, \\frac{0.50}{0.60}\\right) \\approx 0.833$.\n Since $u_0 = 0.50 < 0.833$, accept token 1.\n

    \n

    \n Position 1 (draft token = 2):\n $\\alpha_1 = \\min\\!\\left(1,\\, \\frac{q_1(2)}{p_1(2)}\\right) = \\min\\!\\left(1,\\, \\frac{0.20}{0.50}\\right) = 0.40$.\n Since $u_1 = 0.70 \\ge 0.40$, reject. Resample from adjusted distribution:\n $$\n \\text{adj}(v) = \\max(0,\\, q_1(v) - p_1(v)) = [0.20,\\, 0,\\, 0,\\, 0.10]\n $$\n $$\n \\text{normalized} = \\left[\\tfrac{2}{3},\\, 0,\\, 0,\\, \\tfrac{1}{3}\\right], \\quad\n \\text{CDF} = [0.667,\\, 0.667,\\, 0.667,\\, 1.0]\n $$\n With $r = \\text{uniform_samples}[0, T] = 0.90$, inverse CDF gives token 3.\n

    \n

    \n Output:\n $$\\text{output_tokens} = \\begin{bmatrix} 1 & 3 & 0 & 0 \\end{bmatrix}$$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ B ≤ 256
    • \n
    • 1 ≤ T ≤ 16
    • \n
    • 2 ≤ V ≤ 131,072
    • \n
    • draft_probs[b, i, :] and target_probs[b, i, :] are valid probability distributions (non-negative, sum to 1)
    • \n
    • draft_probs[b, i, draft_tokens[b, i]] > 0 for all b, i
    • \n
    • uniform_samples values are in $[0, 1)$
    • \n
    • All floating-point tensors use float32; token tensors use int32
    • \n
    • Performance is measured with B = 64, T = 8, V = 32,768
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens are device pointers\nextern \"C\" void solve(const int* draft_tokens, const float* draft_probs, const float* target_probs,\n const float* uniform_samples, int* output_tokens, int B, int T, int V) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens are tensors on the GPU\n@cute.jit\ndef solve(\n draft_tokens: cute.Tensor,\n draft_probs: cute.Tensor,\n target_probs: cute.Tensor,\n uniform_samples: cute.Tensor,\n output_tokens: cute.Tensor,\n B: cute.Int32,\n T: cute.Int32,\n V: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# draft_tokens, draft_probs, target_probs, uniform_samples are tensors on GPU\n@jax.jit\ndef solve(\n draft_tokens: jax.Array,\n draft_probs: jax.Array,\n target_probs: jax.Array,\n uniform_samples: jax.Array,\n B: int,\n T: int,\n V: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from gpu.host import DeviceContext\nfrom memory import UnsafePointer\n\n# draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens are device pointers\n@export\ndef solve(\n draft_tokens: UnsafePointer[Int32],\n draft_probs: UnsafePointer[Float32],\n target_probs: UnsafePointer[Float32],\n uniform_samples: UnsafePointer[Float32],\n output_tokens: UnsafePointer[Int32],\n B: Int32,\n T: Int32,\n V: Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens are tensors on the GPU\ndef solve(\n draft_tokens: torch.Tensor,\n draft_probs: torch.Tensor,\n target_probs: torch.Tensor,\n uniform_samples: torch.Tensor,\n output_tokens: torch.Tensor,\n B: int,\n T: int,\n V: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# draft_tokens, draft_probs, target_probs, uniform_samples, output_tokens are tensors on the GPU\ndef solve(\n draft_tokens: torch.Tensor,\n draft_probs: torch.Tensor,\n target_probs: torch.Tensor,\n uniform_samples: torch.Tensor,\n output_tokens: torch.Tensor,\n B: int,\n T: int,\n V: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Speculative Decoding Verification\",\n", + " atol=1e-05,\n", + " rtol=1e-05,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " draft_tokens: torch.Tensor,\n", + " draft_probs: torch.Tensor,\n", + " target_probs: torch.Tensor,\n", + " uniform_samples: torch.Tensor,\n", + " output_tokens: torch.Tensor,\n", + " B: int,\n", + " T: int,\n", + " V: int,\n", + " ):\n", + " assert draft_tokens.shape == (B, T)\n", + " assert draft_probs.shape == (B, T, V)\n", + " assert target_probs.shape == (B, T, V)\n", + " assert uniform_samples.shape == (B, T + 1)\n", + " assert output_tokens.shape == (B, T + 1)\n", + " assert draft_tokens.dtype == torch.int32\n", + " assert draft_probs.dtype == torch.float32\n", + " assert target_probs.dtype == torch.float32\n", + " assert uniform_samples.dtype == torch.float32\n", + " assert output_tokens.dtype == torch.int32\n", + " assert draft_tokens.device.type == \"cuda\"\n", + " assert draft_probs.device.type == \"cuda\"\n", + " assert target_probs.device.type == \"cuda\"\n", + " assert uniform_samples.device.type == \"cuda\"\n", + " assert output_tokens.device.type == \"cuda\"\n", + "\n", + " output_tokens.fill_(0)\n", + "\n", + " for b in range(B):\n", + " for i in range(T):\n", + " tok = int(draft_tokens[b, i].item())\n", + " p = draft_probs[b, i, tok].item()\n", + " q = target_probs[b, i, tok].item()\n", + " alpha = min(1.0, q / p)\n", + "\n", + " if uniform_samples[b, i].item() < alpha:\n", + " output_tokens[b, i] = tok\n", + " else:\n", + " adjusted = torch.clamp(target_probs[b, i] - draft_probs[b, i], min=0.0)\n", + " total = adjusted.sum().item()\n", + " if total > 0.0:\n", + " adjusted = adjusted / total\n", + " else:\n", + " adjusted = (\n", + " torch.ones(V, dtype=torch.float32, device=draft_tokens.device) / V\n", + " )\n", + " cdf = torch.cumsum(adjusted, dim=0)\n", + " r = float(uniform_samples[b, T].item())\n", + " new_tok = int(torch.searchsorted(cdf.contiguous(), r).item())\n", + " output_tokens[b, i] = min(new_tok, V - 1)\n", + " break\n", + " else:\n", + " cdf = torch.cumsum(target_probs[b, T - 1], dim=0)\n", + " r = float(uniform_samples[b, T].item())\n", + " bonus_tok = int(torch.searchsorted(cdf.contiguous(), r).item())\n", + " output_tokens[b, T] = min(bonus_tok, V - 1)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"draft_tokens\": (ctypes.POINTER(ctypes.c_int), \"in\"),\n", + " \"draft_probs\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"target_probs\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"uniform_samples\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output_tokens\": (ctypes.POINTER(ctypes.c_int), \"out\"),\n", + " \"B\": (ctypes.c_int, \"in\"),\n", + " \"T\": (ctypes.c_int, \"in\"),\n", + " \"V\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_sparse_probs(self, B, T, V, K, device):\n", + " \"\"\"Generate sparse probability distributions: only K tokens have nonzero probability.\n", + "\n", + " Using sparse distributions ensures that the adjusted distribution clamp(q-p, 0)\n", + " has at most 2K nonzero entries, making CDF summation numerically exact regardless\n", + " of summation order. This prevents floating-point sensitivity for large V.\n", + " \"\"\"\n", + " K = min(K, V)\n", + " flat = B * T\n", + " # For each (b, i), sample K distinct token indices\n", + " idx = torch.stack([torch.randperm(V, device=device)[:K] for _ in range(flat)])\n", + " idx = idx.view(B, T, K)\n", + " # Random weights summing to 1\n", + " weights = torch.rand(B, T, K, device=device)\n", + " weights = weights / weights.sum(dim=-1, keepdim=True)\n", + " # Scatter into full V-dimensional probability vector\n", + " probs = torch.zeros(B, T, V, device=device)\n", + " probs.scatter_(2, idx, weights)\n", + " return probs, idx\n", + "\n", + " def _make_test_case(self, B, T, V, seed=42):\n", + " torch.manual_seed(seed)\n", + " device = \"cuda\"\n", + "\n", + " # K=64 active tokens per position: enough diversity while keeping the adjusted\n", + " # distribution sparse (at most 128 nonzero entries), ensuring CDF sums are\n", + " # independent of floating-point summation order.\n", + " K = min(64, V)\n", + " draft_probs, draft_idx = self._make_sparse_probs(B, T, V, K, device)\n", + " target_probs, _ = self._make_sparse_probs(B, T, V, K, device)\n", + "\n", + " # Sample draft tokens from the active K tokens\n", + " weights = draft_probs.gather(2, draft_idx) # (B, T, K)\n", + " flat_w = weights.view(B * T, K)\n", + " chosen = torch.multinomial(flat_w, 1).view(B, T) # index within the K tokens\n", + " draft_tokens = draft_idx.gather(2, chosen.unsqueeze(-1)).squeeze(-1).to(torch.int32)\n", + "\n", + " uniform_samples = torch.rand(B, T + 1, device=device)\n", + " output_tokens = torch.zeros(B, T + 1, device=device, dtype=torch.int32)\n", + "\n", + " return {\n", + " \"draft_tokens\": draft_tokens,\n", + " \"draft_probs\": draft_probs,\n", + " \"target_probs\": target_probs,\n", + " \"uniform_samples\": uniform_samples,\n", + " \"output_tokens\": output_tokens,\n", + " \"B\": B,\n", + " \"T\": T,\n", + " \"V\": V,\n", + " }\n", + "\n", + " def _make_accept_all_case(self, B, T, V, seed=42):\n", + " \"\"\"All draft tokens accepted: target_probs == draft_probs so alpha == 1 everywhere.\"\"\"\n", + " torch.manual_seed(seed)\n", + " device = \"cuda\"\n", + "\n", + " K = min(64, V)\n", + " draft_probs, draft_idx = self._make_sparse_probs(B, T, V, K, device)\n", + " target_probs = draft_probs.clone() # alpha = min(1, q/p) = 1 \u2192 always accept\n", + "\n", + " weights = draft_probs.gather(2, draft_idx)\n", + " flat_w = weights.view(B * T, K)\n", + " chosen = torch.multinomial(flat_w, 1).view(B, T)\n", + " draft_tokens = draft_idx.gather(2, chosen.unsqueeze(-1)).squeeze(-1).to(torch.int32)\n", + "\n", + " # All acceptance samples set to 0 (< 1.0 = alpha) to guarantee acceptance\n", + " uniform_samples = torch.zeros(B, T + 1, device=device)\n", + " uniform_samples[:, T] = torch.rand(B, device=device) # bonus sampling sample\n", + "\n", + " output_tokens = torch.zeros(B, T + 1, device=device, dtype=torch.int32)\n", + "\n", + " return {\n", + " \"draft_tokens\": draft_tokens,\n", + " \"draft_probs\": draft_probs,\n", + " \"target_probs\": target_probs,\n", + " \"uniform_samples\": uniform_samples,\n", + " \"output_tokens\": output_tokens,\n", + " \"B\": B,\n", + " \"T\": T,\n", + " \"V\": V,\n", + " }\n", + "\n", + " def _make_reject_first_case(self, B, T, V, seed=42):\n", + " \"\"\"First draft token always rejected: draft_probs high, target low for that token.\"\"\"\n", + " torch.manual_seed(seed)\n", + " device = \"cuda\"\n", + "\n", + " draft_probs = torch.softmax(torch.randn(B, T, V, device=device), dim=-1)\n", + " target_probs = torch.softmax(torch.randn(B, T, V, device=device), dim=-1)\n", + "\n", + " flat = draft_probs.view(B * T, V)\n", + " draft_tokens = torch.multinomial(flat, 1).view(B, T).to(torch.int32)\n", + "\n", + " # Force rejection at position 0 for every sequence:\n", + " # set alpha[b,0] very small and uniform_sample[b,0] high enough to reject\n", + " for b in range(B):\n", + " tok = int(draft_tokens[b, 0].item())\n", + " # Make draft prob ~0.9 for the chosen token (high p)\n", + " draft_probs[b, 0] = torch.full((V,), 0.1 / max(V - 1, 1), device=device)\n", + " draft_probs[b, 0, tok] = 0.9\n", + " draft_probs[b, 0] = draft_probs[b, 0] / draft_probs[b, 0].sum()\n", + " # Make target prob ~1/V for the same token (low q)\n", + " target_probs[b, 0] = torch.ones(V, device=device) / V\n", + "\n", + " uniform_samples = torch.rand(B, T + 1, device=device)\n", + " # Force uniform[b, 0] = 0.99 > alpha (which is ~1/V / 0.9 \u2248 small)\n", + " uniform_samples[:, 0] = 0.99\n", + "\n", + " output_tokens = torch.zeros(B, T + 1, device=device, dtype=torch.int32)\n", + "\n", + " return {\n", + " \"draft_tokens\": draft_tokens,\n", + " \"draft_probs\": draft_probs,\n", + " \"target_probs\": target_probs,\n", + " \"uniform_samples\": uniform_samples,\n", + " \"output_tokens\": output_tokens,\n", + " \"B\": B,\n", + " \"T\": T,\n", + " \"V\": V,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + "\n", + " # B=1, T=3, V=4: position 0 accepted, position 1 rejected, token resampled\n", + " draft_tokens = torch.tensor([[1, 2, 0]], device=device, dtype=torch.int32)\n", + "\n", + " draft_probs = torch.tensor(\n", + " [\n", + " [\n", + " [0.10, 0.60, 0.20, 0.10], # pos 0: draft_tokens[0,0]=1, p=0.60\n", + " [0.10, 0.20, 0.50, 0.20], # pos 1: draft_tokens[0,1]=2, p=0.50\n", + " [0.40, 0.20, 0.20, 0.20], # pos 2: draft_tokens[0,2]=0, p=0.40\n", + " ]\n", + " ],\n", + " device=device,\n", + " dtype=torch.float32,\n", + " )\n", + "\n", + " target_probs = torch.tensor(\n", + " [\n", + " [\n", + " [0.10, 0.50, 0.20, 0.20], # pos 0: q=0.50, alpha=min(1,0.50/0.60)=0.833\n", + " [0.30, 0.20, 0.20, 0.30], # pos 1: q=0.20, alpha=min(1,0.20/0.50)=0.400\n", + " [0.30, 0.20, 0.30, 0.20], # pos 2: not reached\n", + " ]\n", + " ],\n", + " device=device,\n", + " dtype=torch.float32,\n", + " )\n", + "\n", + " # uniform_samples[0, 0]=0.50 < 0.833 \u2192 ACCEPT token 1\n", + " # uniform_samples[0, 1]=0.70 > 0.400 \u2192 REJECT token 2\n", + " # adjusted = clamp([0.20, 0, -0.30, 0.10], min=0) = [0.20, 0, 0, 0.10]\n", + " # normalized CDF = [0.667, 0.667, 0.667, 1.0]\n", + " # uniform_samples[0, T=3]=0.90 \u2192 searchsorted \u2192 token 3\n", + " # output_tokens[0] = [1, 3, 0, 0]\n", + " uniform_samples = torch.tensor(\n", + " [[0.50, 0.70, 0.30, 0.90]], device=device, dtype=torch.float32\n", + " )\n", + "\n", + " output_tokens = torch.zeros(1, 4, device=device, dtype=torch.int32)\n", + "\n", + " return {\n", + " \"draft_tokens\": draft_tokens,\n", + " \"draft_probs\": draft_probs,\n", + " \"target_probs\": target_probs,\n", + " \"uniform_samples\": uniform_samples,\n", + " \"output_tokens\": output_tokens,\n", + " \"B\": 1,\n", + " \"T\": 3,\n", + " \"V\": 4,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + "\n", + " # Edge: T=1, rejected immediately\n", + " tests.append(self._make_reject_first_case(1, 1, 4, seed=1))\n", + "\n", + " # Edge: T=1, all accepted (bonus token sampled)\n", + " tests.append(self._make_accept_all_case(1, 1, 4, seed=2))\n", + "\n", + " # Edge: T=2, first rejected\n", + " tests.append(self._make_reject_first_case(1, 2, 8, seed=3))\n", + "\n", + " # Edge: T=4, all accepted\n", + " tests.append(self._make_accept_all_case(2, 4, 8, seed=4))\n", + "\n", + " # Zero uniform_samples acceptance values \u2192 force rejection at pos 0 (unless alpha=1)\n", + " tests.append(self._make_reject_first_case(4, 4, 16, seed=5))\n", + "\n", + " # Power-of-2 vocab, mixed acceptance\n", + " tests.append(self._make_test_case(4, 8, 64, seed=10))\n", + "\n", + " # Larger vocab, mixed acceptance\n", + " tests.append(self._make_test_case(8, 8, 256, seed=20))\n", + "\n", + " # Non-power-of-2 vocab\n", + " tests.append(self._make_test_case(4, 6, 30, seed=30))\n", + "\n", + " # All sequences accept all tokens (bonus sampling)\n", + " tests.append(self._make_accept_all_case(8, 8, 128, seed=40))\n", + "\n", + " # Realistic small batch\n", + " tests.append(self._make_test_case(16, 8, 1000, seed=50))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # B=64 sequences, T=8 draft tokens, V=32768 (Mistral/LLaMA-2 vocab size)\n", + " return self._make_test_case(64, 8, 32768, seed=0)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/90_causal_depthwise_conv1d.ipynb b/challenges/colab_exports/medium/90_causal_depthwise_conv1d.ipynb new file mode 100644 index 00000000..ac7eb888 --- /dev/null +++ b/challenges/colab_exports/medium/90_causal_depthwise_conv1d.ipynb @@ -0,0 +1,568 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement a causal depthwise 1D convolution over a batched sequence tensor\n x of shape (B, L, D), producing an output of the same shape.\n In a depthwise convolution, each channel d is convolved independently using its\n own kernel weight[d, :] \u2014 there is no mixing across channels.\n The convolution is causal: output position l may only depend on\n input positions 0, 1, …, l (past and present), never future positions.\n This operation is a key component of state-space models such as Mamba, where it is applied\n before the selective scan to mix local context within each feature channel.\n

    \n\n\n \n \n \n \n \n\n \n \n\n \n Causal Depthwise Conv1d (K=3, one channel shown)\n\n \n x[d]\n\n \n \n x\u2080\n\n \n x\u2081\n\n \n x\u2082\n\n \n x\u2083\n\n \n x\u2084\n\n \n x\u2085\n\n \n w[d]\n \n w\u2080\n \n w\u2081\n \n w\u2082\n\n \n kernel at l=4: reads x\u2082,x\u2083,x\u2084\n\n \n \n\n \n y[d]\n\n \n \n y\u2080\n\n \n y\u2081\n\n \n y\u2082\n\n \n y\u2083\n\n \n y\u2084\n\n \n y\u2085\n\n \n \n y[d,l] = bias[d] + \u03a3 w[d,k] \u00b7 x[d, l\u2212k] (x[d,l\u2212k] = 0 if l\u2212k < 0)\n \n\n\n

    \n Formally, for each batch element b, sequence position l, and channel d:\n

    \n\n$$\n\\text{output}[b,\\, l,\\, d]\n= \\text{bias}[d]\n+ \\sum_{k=0}^{K-1} \\text{weight}[d,\\, k] \\cdot x[b,\\, l - k,\\, d]\n$$\n\n

    \n where positions l − k < 0 are treated as zero (zero-pad the left boundary).\n The tensor layout is channels-last: x[b, l, d] is stored at offset\n b × L × D + l × D + d.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • The solve function signature must remain unchanged
    • \n
    • The result must be written into the output tensor
    • \n
    • Use only native features (external libraries are not permitted)
    • \n
    • Input positions before the start of the sequence (i.e. indices l − k < 0) must be treated as zero
    • \n
    \n\n

    Example

    \n\n

    With B = 1, L = 4, D = 2, K = 3:

    \n\n
    \nx      = [[[1.0, 2.0],    # l=0\n           [3.0, 4.0],    # l=1\n           [5.0, 6.0],    # l=2\n           [7.0, 8.0]]]   # l=3   shape (1, 4, 2)\n\nweight = [[ 1.0,  0.0, -1.0],   # channel d=0\n          [ 1.0,  1.0,  1.0]]   # channel d=1   shape (2, 3)\n\nbias   = [0.0, 0.0]\n\noutput = [[[1.0,  2.0],   # l=0: d0: 1*1=1          d1: 1*2=2\n           [3.0,  6.0],   # l=1: d0: 3*1+1*0=3      d1: 4*1+2*1=6\n           [4.0, 12.0],   # l=2: d0: 5*1+3*0+1*(-1)=4  d1: 6+4+2=12\n           [4.0, 18.0]]]  # l=3: d0: 7*1+5*0+3*(-1)=4  d1: 8+6+4=18\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ B ≤ 16 (batch size)
    • \n
    • 1 ≤ L ≤ 8,192 (sequence length)
    • \n
    • 1 ≤ D ≤ 8,192 (number of channels)
    • \n
    • 1 ≤ K ≤ 8 (kernel size; typically 3 or 4 in practice)
    • \n
    • All tensors use 32-bit floating point
    • \n
    • Tensor x and output use channels-last layout: shape (B, L, D)
    • \n
    • Performance is measured with B = 8, L = 2,048, D = 4,096, K = 4
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// x, weight, bias, output are device pointers\nextern \"C\" void solve(const float* x, const float* weight, const float* bias, float* output, int B,\n int L, int D, int K) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# x, weight, bias, output are tensors on the GPU\n@cute.jit\ndef solve(\n x: cute.Tensor,\n weight: cute.Tensor,\n bias: cute.Tensor,\n output: cute.Tensor,\n B: cute.Int32,\n L: cute.Int32,\n D: cute.Int32,\n K: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# x, weight, bias are tensors on GPU\n@jax.jit\ndef solve(\n x: jax.Array, weight: jax.Array, bias: jax.Array, B: int, L: int, D: int, K: int\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from gpu.host import DeviceContext\nfrom memory import UnsafePointer\n\n# x, weight, bias, output are device pointers\n@export\ndef solve(\n x: UnsafePointer[Float32],\n weight: UnsafePointer[Float32],\n bias: UnsafePointer[Float32],\n output: UnsafePointer[Float32],\n B: Int32,\n L: Int32,\n D: Int32,\n K: Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# x, weight, bias, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n weight: torch.Tensor,\n bias: torch.Tensor,\n output: torch.Tensor,\n B: int,\n L: int,\n D: int,\n K: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# x, weight, bias, output are tensors on the GPU\ndef solve(\n x: torch.Tensor,\n weight: torch.Tensor,\n bias: torch.Tensor,\n output: torch.Tensor,\n B: int,\n L: int,\n D: int,\n K: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Causal Depthwise Conv1d\",\n", + " atol=1e-04,\n", + " rtol=1e-04,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " x: torch.Tensor,\n", + " weight: torch.Tensor,\n", + " bias: torch.Tensor,\n", + " output: torch.Tensor,\n", + " B: int,\n", + " L: int,\n", + " D: int,\n", + " K: int,\n", + " ):\n", + " assert x.shape == (B, L, D)\n", + " assert weight.shape == (D, K)\n", + " assert bias.shape == (D,)\n", + " assert output.shape == (B, L, D)\n", + " assert x.dtype == weight.dtype == bias.dtype == output.dtype == torch.float32\n", + " assert x.device.type == \"cuda\"\n", + " assert weight.device.type == \"cuda\"\n", + " assert bias.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + "\n", + " # Reshape to (B, D, L) for conv1d\n", + " x_t = x.permute(0, 2, 1).contiguous() # (B, D, L)\n", + "\n", + " # Causal padding: pad K-1 zeros on the left so each output position\n", + " # only sees current and past input positions\n", + " x_padded = F.pad(x_t, (K - 1, 0)) # (B, D, L + K - 1)\n", + "\n", + " # Depthwise conv: weight (D, K) -> (D, 1, K), groups=D\n", + " # Flip the kernel so weight[d, 0] applies to the current position (l-0)\n", + " # and weight[d, K-1] applies to the oldest position (l-(K-1)).\n", + " # F.conv1d uses cross-correlation (no implicit flip), so we flip explicitly.\n", + " w = weight.flip(1).unsqueeze(1) # (D, 1, K)\n", + " result = F.conv1d(x_padded, w, bias=bias, groups=D) # (B, D, L)\n", + "\n", + " output.copy_(result.permute(0, 2, 1)) # (B, L, D)\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"x\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"weight\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"bias\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"B\": (ctypes.c_int, \"in\"),\n", + " \"L\": (ctypes.c_int, \"in\"),\n", + " \"D\": (ctypes.c_int, \"in\"),\n", + " \"K\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " B, L, D, K = 1, 4, 2, 3\n", + " x = torch.tensor(\n", + " [[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]],\n", + " device=\"cuda\",\n", + " dtype=torch.float32,\n", + " )\n", + " weight = torch.tensor(\n", + " [[1.0, 0.0, -1.0], [1.0, 1.0, 1.0]], device=\"cuda\", dtype=torch.float32\n", + " )\n", + " bias = torch.zeros(D, device=\"cuda\", dtype=torch.float32)\n", + " output = torch.empty(B, L, D, device=\"cuda\", dtype=torch.float32)\n", + " return {\n", + " \"x\": x,\n", + " \"weight\": weight,\n", + " \"bias\": bias,\n", + " \"output\": output,\n", + " \"B\": B,\n", + " \"L\": L,\n", + " \"D\": D,\n", + " \"K\": K,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " dtype = torch.float32\n", + " test_cases = []\n", + "\n", + " def make_case(B, L, D, K, x_vals=None, w_vals=None, b_vals=None):\n", + " if x_vals is not None:\n", + " x = torch.tensor(x_vals, device=\"cuda\", dtype=dtype)\n", + " else:\n", + " x = torch.randn(B, L, D, device=\"cuda\", dtype=dtype)\n", + " if w_vals is not None:\n", + " weight = torch.tensor(w_vals, device=\"cuda\", dtype=dtype)\n", + " else:\n", + " weight = torch.randn(D, K, device=\"cuda\", dtype=dtype)\n", + " if b_vals is not None:\n", + " bias = torch.tensor(b_vals, device=\"cuda\", dtype=dtype)\n", + " else:\n", + " bias = torch.randn(D, device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(B, L, D, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"weight\": weight,\n", + " \"bias\": bias,\n", + " \"output\": output,\n", + " \"B\": B,\n", + " \"L\": L,\n", + " \"D\": D,\n", + " \"K\": K,\n", + " }\n", + "\n", + " # Example test (matches generate_example_test)\n", + " test_cases.append(\n", + " make_case(\n", + " 1,\n", + " 4,\n", + " 2,\n", + " 3,\n", + " x_vals=[[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]],\n", + " w_vals=[[1.0, 0.0, -1.0], [1.0, 1.0, 1.0]],\n", + " b_vals=[0.0, 0.0],\n", + " )\n", + " )\n", + "\n", + " # Edge cases: minimal sizes\n", + " test_cases.append(make_case(1, 1, 1, 1)) # single element, kernel=1\n", + " test_cases.append(make_case(1, 2, 1, 2)) # L < K, so first output is partial\n", + " test_cases.append(make_case(2, 3, 4, 3)) # small batch, B=2\n", + "\n", + " # Zero inputs\n", + " x_zero = torch.zeros(1, 8, 4, device=\"cuda\", dtype=dtype)\n", + " w_zero = torch.randn(4, 3, device=\"cuda\", dtype=dtype)\n", + " b_zero = torch.randn(4, device=\"cuda\", dtype=dtype)\n", + " test_cases.append(\n", + " {\n", + " \"x\": x_zero,\n", + " \"weight\": w_zero,\n", + " \"bias\": b_zero,\n", + " \"output\": torch.empty(1, 8, 4, device=\"cuda\", dtype=dtype),\n", + " \"B\": 1,\n", + " \"L\": 8,\n", + " \"D\": 4,\n", + " \"K\": 3,\n", + " }\n", + " )\n", + "\n", + " # Negative values\n", + " test_cases.append(make_case(1, 16, 8, 4))\n", + "\n", + " # Power-of-2 sizes\n", + " test_cases.append(make_case(2, 32, 16, 4))\n", + " test_cases.append(make_case(4, 64, 32, 4))\n", + "\n", + " # Non-power-of-2 sizes\n", + " test_cases.append(make_case(3, 30, 12, 3))\n", + " test_cases.append(make_case(2, 100, 24, 4))\n", + "\n", + " # Realistic inference size (Mamba-like small)\n", + " test_cases.append(make_case(2, 256, 128, 4))\n", + "\n", + " return test_cases\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " B, L, D, K = 8, 2048, 4096, 4\n", + " dtype = torch.float32\n", + " x = torch.randn(B, L, D, device=\"cuda\", dtype=dtype)\n", + " weight = torch.randn(D, K, device=\"cuda\", dtype=dtype)\n", + " bias = torch.randn(D, device=\"cuda\", dtype=dtype)\n", + " output = torch.empty(B, L, D, device=\"cuda\", dtype=dtype)\n", + " return {\n", + " \"x\": x,\n", + " \"weight\": weight,\n", + " \"bias\": bias,\n", + " \"output\": output,\n", + " \"B\": B,\n", + " \"L\": L,\n", + " \"D\": D,\n", + " \"K\": K,\n", + " }\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/92_decaying_causal_attention.ipynb b/challenges/colab_exports/medium/92_decaying_causal_attention.ipynb new file mode 100644 index 00000000..5191732f --- /dev/null +++ b/challenges/colab_exports/medium/92_decaying_causal_attention.ipynb @@ -0,0 +1,527 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement decaying causal attention. Given query matrix Q, key matrix K,\n and value matrix V, each of shape seq_len × d_model, and a scalar\n decay factor gamma ∈ (0, 1], compute the unnormalized causal attention output\n where position n attends to all past positions m ≤ n with weight\n gamman−m:\n

    \n

    \n $$\n \\text{output}[n] = \\sum_{m=0}^{n} \\gamma^{n-m} \\cdot \\frac{Q[n] \\cdot K[m]}{\\sqrt{d_{\\text{model}}}} \\cdot V[m]\n $$\n

    \n

    \n Unlike standard softmax attention, there is no normalization \u2014 the weights decay geometrically from\n the current position backward. This is the parallel form of the Retention mechanism (RetNet), used\n as a recurrence-friendly alternative to attention in sequence models.\n

    \n\n\n \n\n \n Causal Decay Mask D[n,m] = γ^(n−m)\n\n \n m=0\n m=1\n m=2\n m=3\n\n \n n=0\n n=1\n n=2\n n=3\n\n \n \n 1\n \n \n \n\n \n \n γ\n \n 1\n \n \n\n \n \n γ²\n \n γ\n \n 1\n \n\n \n \n γ³\n \n γ²\n \n γ\n \n 1\n\n \n \n\n \n Computation\n\n \n \n \n \n \n\n \n \n Q [S, D]\n query\n\n \n K [S, D]\n key\n\n \n V [S, D]\n value\n\n \n \n \n\n \n QKᵀ / √D\n attn scores [S,S]\n\n \n \n ⊙ decay mask\n\n \n weighted [S,S]\n lower triangular\n\n \n \n \n @\n\n \n output [S, D]\n weighted @ V\n\n\n

    Implementation Requirements

    \n
      \n
    • Implement the solve function; do not change its signature.
    • \n
    • Do not use external libraries beyond those provided.
    • \n
    • Write the result into output.
    • \n
    \n\n

    Example

    \n

    Example 1 \u2014 with seq_len = 2, d_model = 4, gamma = 0.5:

    \n

    \n$$\nQ = \\begin{bmatrix} 1 & 1 & 0 & 0 \\\\ 1 & 1 & 0 & 0 \\end{bmatrix}, \\quad\nK = \\begin{bmatrix} 1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\end{bmatrix}, \\quad\nV = \\begin{bmatrix} 4 & 8 & 12 & 16 \\\\ 4 & 8 & 12 & 16 \\end{bmatrix}\n$$\n

    \n

    \n Attention scores $QK^\\top / \\sqrt{4}$:\n $$\n A = \\begin{bmatrix} 0.5 & 0.5 \\\\ 0.5 & 0.5 \\end{bmatrix}\n $$\n Causal decay mask $D_{nm} = 0.5^{n-m}$ for $n \\ge m$, else $0$:\n $$\n D = \\begin{bmatrix} 1 & 0 \\\\ 0.5 & 1 \\end{bmatrix}\n $$\n Weighted attention $A \\odot D$:\n $$\n \\begin{bmatrix} 0.5 & 0 \\\\ 0.25 & 0.5 \\end{bmatrix}\n $$\n Output $(A \\odot D)\\,V$:\n $$\n \\text{output} = \\begin{bmatrix} 2 & 4 & 6 & 8 \\\\ 3 & 6 & 9 & 12 \\end{bmatrix}\n $$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ seq_len ≤ 8,192
    • \n
    • 1 ≤ d_model ≤ 256
    • \n
    • 0 < gamma ≤ 1
    • \n
    • All tensors are float32 on GPU.
    • \n
    • Performance is measured with seq_len = 4,096, d_model = 64
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K, V, output are device pointers\nextern \"C\" void solve(const float* Q, const float* K, const float* V, float* output, int seq_len,\n int d_model, float gamma) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K, V, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K: cute.Tensor,\n V: cute.Tensor,\n output: cute.Tensor,\n seq_len: cute.Int32,\n d_model: cute.Int32,\n gamma: cute.Float32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K, V are tensors on GPU\n@jax.jit\ndef solve(\n Q: jax.Array,\n K: jax.Array,\n V: jax.Array,\n seq_len: int,\n d_model: int,\n gamma: float,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# Q, K, V, output are device pointers\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K: UnsafePointer[Float32, MutExternalOrigin],\n V: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n seq_len: Int32,\n d_model: Int32,\n gamma: Float32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n seq_len: int,\n d_model: int,\n gamma: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K, V, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K: torch.Tensor,\n V: torch.Tensor,\n output: torch.Tensor,\n seq_len: int,\n d_model: int,\n gamma: float,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"Decaying Causal Attention\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K: torch.Tensor,\n", + " V: torch.Tensor,\n", + " output: torch.Tensor,\n", + " seq_len: int,\n", + " d_model: int,\n", + " gamma: float,\n", + " ):\n", + " assert Q.shape == (seq_len, d_model)\n", + " assert K.shape == (seq_len, d_model)\n", + " assert V.shape == (seq_len, d_model)\n", + " assert output.shape == (seq_len, d_model)\n", + " assert Q.dtype == K.dtype == V.dtype == output.dtype == torch.float32\n", + " assert Q.device.type == \"cuda\"\n", + " assert K.device.type == \"cuda\"\n", + " assert V.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + "\n", + " scale = math.sqrt(d_model)\n", + " positions = torch.arange(seq_len, device=Q.device, dtype=Q.dtype)\n", + " # distances[n, m] = n - m; negative means m is in the future relative to n\n", + " distances = positions.unsqueeze(1) - positions.unsqueeze(0)\n", + " # causal: zero out future positions; clamp avoids overflow in gamma**negative\n", + " causal = (distances >= 0).to(Q.dtype)\n", + " decay_mask = torch.pow(gamma, distances.clamp(min=0)) * causal\n", + " attn = torch.matmul(Q, K.T) / scale\n", + " output.copy_(torch.matmul(attn * decay_mask, V))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"V\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " \"d_model\": (ctypes.c_int, \"in\"),\n", + " \"gamma\": (ctypes.c_float, \"in\"),\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " # Orthogonal K rows \u2192 QK^T / sqrt(4) = [[0.5, 0.5], [0.5, 0.5]].\n", + " # With gamma=0.5 decay mask [[1, 0], [0.5, 1]], weighted attn = [[0.5, 0], [0.25, 0.5]].\n", + " # Output row 0 = 0.5 * V[0]; row 1 = 0.25 * V[0] + 0.5 * V[1] = [3, 6, 9, 12].\n", + " Q = torch.tensor([[1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]], device=device, dtype=dtype)\n", + " K = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]], device=device, dtype=dtype)\n", + " V = torch.tensor(\n", + " [[4.0, 8.0, 12.0, 16.0], [4.0, 8.0, 12.0, 16.0]], device=device, dtype=dtype\n", + " )\n", + " output = torch.zeros(2, 4, device=device, dtype=dtype)\n", + " return {\"Q\": Q, \"K\": K, \"V\": V, \"output\": output, \"seq_len\": 2, \"d_model\": 4, \"gamma\": 0.5}\n", + "\n", + " def _make_test_case(\n", + " self,\n", + " seq_len: int,\n", + " d_model: int,\n", + " gamma: float = 0.9,\n", + " zero_qk: bool = False,\n", + " negative: bool = False,\n", + " ) -> Dict[str, Any]:\n", + " dtype = torch.float32\n", + " device = \"cuda\"\n", + " if zero_qk:\n", + " Q = torch.zeros(seq_len, d_model, device=device, dtype=dtype)\n", + " K = torch.zeros(seq_len, d_model, device=device, dtype=dtype)\n", + " V = torch.randn(seq_len, d_model, device=device, dtype=dtype)\n", + " elif negative:\n", + " Q = torch.randn(seq_len, d_model, device=device, dtype=dtype).neg()\n", + " K = torch.randn(seq_len, d_model, device=device, dtype=dtype).neg()\n", + " V = torch.randn(seq_len, d_model, device=device, dtype=dtype).neg()\n", + " else:\n", + " Q = torch.randn(seq_len, d_model, device=device, dtype=dtype)\n", + " K = torch.randn(seq_len, d_model, device=device, dtype=dtype)\n", + " V = torch.randn(seq_len, d_model, device=device, dtype=dtype)\n", + " output = torch.zeros(seq_len, d_model, device=device, dtype=dtype)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K\": K,\n", + " \"V\": V,\n", + " \"output\": output,\n", + " \"seq_len\": seq_len,\n", + " \"d_model\": d_model,\n", + " \"gamma\": gamma,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge: single token (only self-attention possible)\n", + " tests.append(self._make_test_case(1, 4, gamma=0.9))\n", + "\n", + " # Edge: two tokens (matches example structure)\n", + " tests.append(self._make_test_case(2, 4, gamma=0.5))\n", + "\n", + " # Edge: gamma=1.0 \u2014 no decay, equal weight to all past positions\n", + " tests.append(self._make_test_case(4, 8, gamma=1.0))\n", + "\n", + " # Edge: small gamma \u2014 very sharp recency bias\n", + " tests.append(self._make_test_case(4, 8, gamma=0.1))\n", + "\n", + " # Zero Q and K: all attention scores are zero \u2192 output must be all zeros\n", + " tests.append(self._make_test_case(8, 16, gamma=0.9, zero_qk=True))\n", + "\n", + " # All-negative Q, K, V\n", + " tests.append(self._make_test_case(16, 16, gamma=0.8, negative=True))\n", + "\n", + " # Power-of-2 sequence length\n", + " tests.append(self._make_test_case(32, 32, gamma=0.9))\n", + "\n", + " # Power-of-2, larger\n", + " tests.append(self._make_test_case(64, 64, gamma=0.8))\n", + "\n", + " # Non-power-of-2 sequence length\n", + " tests.append(self._make_test_case(30, 32, gamma=0.95))\n", + "\n", + " # Non-power-of-2, larger realistic size\n", + " tests.append(self._make_test_case(100, 64, gamma=0.9))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # Typical LLM head: seq_len=4096, head_dim=64\n", + " return self._make_test_case(4096, 64, gamma=0.9)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/94_ssm_selective_scan.ipynb b/challenges/colab_exports/medium/94_ssm_selective_scan.ipynb new file mode 100644 index 00000000..080f7a59 --- /dev/null +++ b/challenges/colab_exports/medium/94_ssm_selective_scan.ipynb @@ -0,0 +1,582 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \n Implement the forward pass of a State Space Model (SSM) selective scan, the core operation in\n Mamba-style sequence models. Given an input sequence u, time-step parameters\n delta, state-transition matrix A, input projection B,\n output projection C, and skip-connection weights skip, compute the\n output sequence y in float32.\n

    \n\n\n \n \n \n \n h\u2080\n\n \n h\u2081\n\n \n h\u2082\n\n \n h\u2083\n\n \n \n \n \n \u0100\n \u0100\n \u0100\n\n \n \n \n \n \n B\u0304u\u2080\n B\u0304u\u2081\n B\u0304u\u2082\n B\u0304u\u2083\n\n \n \n \n \n \n y\u2080\n y\u2081\n y\u2082\n y\u2083\n\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n\n\n

    Implementation Requirements

    \n

    \n Implement the function solve(u, delta, A, B, C, skip, y, batch, seq_len, d_model, d_state)\n with the signature unchanged. Do not use external libraries beyond the allowed framework.\n Write the result into the pre-allocated output tensor y.\n

    \n

    \n For each batch b, position t, and channel d, the computation is:\n

    \n

    \n $$\n \\bar{A}_{b,t,d,n} = \\exp(\\Delta_{b,t,d} \\cdot A_{d,n})\n $$\n $$\n \\bar{B}_{b,t,d,n} = \\Delta_{b,t,d} \\cdot B_{b,t,n}\n $$\n $$\n h_{b,t,d,n} = \\bar{A}_{b,t,d,n} \\cdot h_{b,t-1,d,n} + \\bar{B}_{b,t,d,n} \\cdot u_{b,t,d}\n $$\n $$\n y_{b,t,d} = \\sum_{n} C_{b,t,n} \\cdot h_{b,t,d,n} + \\text{skip}_d \\cdot u_{b,t,d}\n $$\n

    \n

    \n The initial hidden state $h_{b,-1,d,n} = 0$ for all $b, d, n$.\n All channels d are independent: they share the same B and C\n projections but have separate state-transition rows in A.\n

    \n\n

    Example

    \n
    \nInput:\n  u     = [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.0, 0.0]]]  shape (1,4,2)\n  delta = [[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]]  shape (1,4,2)\n  A     = [[-0.5, -1.0], [-0.5, -1.0]]                         shape (2,2)\n  B     = [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.5, 0.5]]]  shape (1,4,2)\n  C     = [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.5, 0.5]]]  shape (1,4,2)\n  skip  = [0.0, 0.0]                                            shape (2,)\n  batch=1, seq_len=4, d_model=2, d_state=2\n\nDerivation (delta=1 everywhere, so A_bar_dn = exp(A_dn)):\n  A_bar[d=0] = [exp(-0.5), exp(-1.0)] \u2248 [0.607, 0.368]\n  A_bar[d=1] = [exp(-0.5), exp(-1.0)] \u2248 [0.607, 0.368]\n\n  Hidden state h has shape (d_model=2, d_state=2); initial h = zeros.\n  t=0: h = [[1.000, 0.000], [0.000, 0.000]]  \u2192  y[0,0] = [1.000, 0.000]\n  t=1: h = [[0.607, 0.000], [0.000, 1.000]]  \u2192  y[0,1] = [0.000, 1.000]\n  t=2: h = [[1.368, 1.000], [1.000, 1.368]]  \u2192  y[0,2] = [2.368, 2.368]\n  t=3: h = [[0.830, 0.368], [0.607, 0.503]]  \u2192  y[0,3] = [0.599, 0.555]\n\nOutput:\n  y = [[[1.000, 0.000], [0.000, 1.000], [2.368, 2.368], [0.599, 0.555]]]\n
    \n\n

    Constraints

    \n
      \n
    • 1 ≤ batch ≤ 16
    • \n
    • 1 ≤ seq_len ≤ 8,192
    • \n
    • 1 ≤ d_model ≤ 2,048
    • \n
    • 1 ≤ d_state ≤ 64
    • \n
    • All entries of delta are positive
    • \n
    • All entries of A are negative (ensuring A_bar ∈ (0, 1))
    • \n
    • All tensors are float32 on the GPU
    • \n
    • Performance is measured with batch = 4, seq_len = 4,096, d_model = 512, d_state = 16
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n#include \n\n// u, delta, A, B, C, skip, y are device pointers\nextern \"C\" void solve(const float* u, const float* delta, const float* A, const float* B,\n const float* C, const float* skip, float* y, int batch, int seq_len,\n int d_model, int d_state) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# u, delta, A, B, C, skip, y are tensors on the GPU\n@cute.jit\ndef solve(\n u: cute.Tensor,\n delta: cute.Tensor,\n A: cute.Tensor,\n B: cute.Tensor,\n C: cute.Tensor,\n skip: cute.Tensor,\n y: cute.Tensor,\n batch: cute.Uint32,\n seq_len: cute.Uint32,\n d_model: cute.Uint32,\n d_state: cute.Uint32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# u, delta, A, B, C, skip are tensors on GPU\n@jax.jit\ndef solve(\n u: jax.Array,\n delta: jax.Array,\n A: jax.Array,\n B: jax.Array,\n C: jax.Array,\n skip: jax.Array,\n batch: int,\n seq_len: int,\n d_model: int,\n d_state: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# u, delta, A, B, C, skip, y are device pointers\n@export\ndef solve(\n u: UnsafePointer[Float32, MutExternalOrigin],\n delta: UnsafePointer[Float32, MutExternalOrigin],\n A: UnsafePointer[Float32, MutExternalOrigin],\n B: UnsafePointer[Float32, MutExternalOrigin],\n C: UnsafePointer[Float32, MutExternalOrigin],\n skip: UnsafePointer[Float32, MutExternalOrigin],\n y: UnsafePointer[Float32, MutExternalOrigin],\n batch: Int32,\n seq_len: Int32,\n d_model: Int32,\n d_state: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# u, delta, A, B, C, skip, y are tensors on the GPU\ndef solve(\n u: torch.Tensor,\n delta: torch.Tensor,\n A: torch.Tensor,\n B: torch.Tensor,\n C: torch.Tensor,\n skip: torch.Tensor,\n y: torch.Tensor,\n batch: int,\n seq_len: int,\n d_model: int,\n d_state: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# u, delta, A, B, C, skip, y are tensors on the GPU\ndef solve(\n u: torch.Tensor,\n delta: torch.Tensor,\n A: torch.Tensor,\n B: torch.Tensor,\n C: torch.Tensor,\n skip: torch.Tensor,\n y: torch.Tensor,\n batch: int,\n seq_len: int,\n d_model: int,\n d_state: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"SSM Selective Scan\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " u: torch.Tensor,\n", + " delta: torch.Tensor,\n", + " A: torch.Tensor,\n", + " B: torch.Tensor,\n", + " C: torch.Tensor,\n", + " skip: torch.Tensor,\n", + " y: torch.Tensor,\n", + " batch: int,\n", + " seq_len: int,\n", + " d_model: int,\n", + " d_state: int,\n", + " ):\n", + " assert u.shape == (batch, seq_len, d_model)\n", + " assert delta.shape == (batch, seq_len, d_model)\n", + " assert A.shape == (d_model, d_state)\n", + " assert B.shape == (batch, seq_len, d_state)\n", + " assert C.shape == (batch, seq_len, d_state)\n", + " assert skip.shape == (d_model,)\n", + " assert y.shape == (batch, seq_len, d_model)\n", + " assert (\n", + " u.dtype == delta.dtype == A.dtype == B.dtype == C.dtype == skip.dtype == torch.float32\n", + " )\n", + " assert u.device.type == \"cuda\"\n", + " assert delta.device.type == \"cuda\"\n", + " assert A.device.type == \"cuda\"\n", + " assert B.device.type == \"cuda\"\n", + " assert C.device.type == \"cuda\"\n", + " assert skip.device.type == \"cuda\"\n", + " assert y.device.type == \"cuda\"\n", + "\n", + " # Hidden state: (batch, d_model, d_state)\n", + " h = torch.zeros(batch, d_model, d_state, device=u.device, dtype=u.dtype)\n", + "\n", + " for t in range(seq_len):\n", + " delta_t = delta[:, t, :] # (batch, d_model)\n", + " u_t = u[:, t, :] # (batch, d_model)\n", + "\n", + " # Discretize: A_bar = exp(delta_t * A)\n", + " # delta_t: (batch, d_model) -> (batch, d_model, 1)\n", + " # A: (d_model, d_state) -> (1, d_model, d_state)\n", + " A_bar = torch.exp(delta_t.unsqueeze(-1) * A.unsqueeze(0)) # (batch, d_model, d_state)\n", + "\n", + " # B_bar = delta_t * B_t\n", + " # B[:, t, :]: (batch, d_state) -> (batch, 1, d_state)\n", + " B_bar = delta_t.unsqueeze(-1) * B[:, t, :].unsqueeze(1) # (batch, d_model, d_state)\n", + "\n", + " # State update: h = A_bar * h + B_bar * u_t\n", + " h = A_bar * h + B_bar * u_t.unsqueeze(-1) # (batch, d_model, d_state)\n", + "\n", + " # Output: y_t = C_t @ h + skip * u_t\n", + " # C[:, t, :]: (batch, d_state) -> einsum with h (batch, d_model, d_state)\n", + " C_t = C[:, t, :] # (batch, d_state)\n", + " y_t = torch.einsum(\"bn,bdn->bd\", C_t, h) + skip * u_t # (batch, d_model)\n", + " y[:, t, :] = y_t\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"u\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"delta\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"A\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"B\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"C\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"skip\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"y\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"batch\": (ctypes.c_int, \"in\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " \"d_model\": (ctypes.c_int, \"in\"),\n", + " \"d_state\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, batch, seq_len, d_model, d_state, zero_u=False, zero_delta=False):\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " if zero_u:\n", + " u = torch.zeros(batch, seq_len, d_model, device=device, dtype=dtype)\n", + " else:\n", + " u = torch.randn(batch, seq_len, d_model, device=device, dtype=dtype)\n", + " if zero_delta:\n", + " delta = torch.zeros(batch, seq_len, d_model, device=device, dtype=dtype)\n", + " else:\n", + " # delta must be positive\n", + " delta = torch.rand(batch, seq_len, d_model, device=device, dtype=dtype) + 0.01\n", + " # A must be negative for stability (eigenvalues < 0)\n", + " A = -torch.rand(d_model, d_state, device=device, dtype=dtype) - 0.01\n", + " B = torch.randn(batch, seq_len, d_state, device=device, dtype=dtype)\n", + " C = torch.randn(batch, seq_len, d_state, device=device, dtype=dtype)\n", + " skip = torch.rand(d_model, device=device, dtype=dtype)\n", + " y = torch.empty(batch, seq_len, d_model, device=device, dtype=dtype)\n", + " return {\n", + " \"u\": u,\n", + " \"delta\": delta,\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"skip\": skip,\n", + " \"y\": y,\n", + " \"batch\": batch,\n", + " \"seq_len\": seq_len,\n", + " \"d_model\": d_model,\n", + " \"d_state\": d_state,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " device = \"cuda\"\n", + " dtype = torch.float32\n", + " batch, seq_len, d_model, d_state = 1, 4, 2, 2\n", + " u = torch.tensor(\n", + " [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.0, 0.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " delta = torch.tensor(\n", + " [[[1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " A = torch.tensor([[-0.5, -1.0], [-0.5, -1.0]], device=device, dtype=dtype)\n", + " B = torch.tensor(\n", + " [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.5, 0.5]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " C = torch.tensor(\n", + " [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0], [0.5, 0.5]]],\n", + " device=device,\n", + " dtype=dtype,\n", + " )\n", + " skip = torch.tensor([0.0, 0.0], device=device, dtype=dtype)\n", + " y = torch.empty(batch, seq_len, d_model, device=device, dtype=dtype)\n", + " return {\n", + " \"u\": u,\n", + " \"delta\": delta,\n", + " \"A\": A,\n", + " \"B\": B,\n", + " \"C\": C,\n", + " \"skip\": skip,\n", + " \"y\": y,\n", + " \"batch\": batch,\n", + " \"seq_len\": seq_len,\n", + " \"d_model\": d_model,\n", + " \"d_state\": d_state,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " torch.manual_seed(42)\n", + " tests = []\n", + "\n", + " # Edge case: single token\n", + " tests.append(self._make_test_case(1, 1, 1, 4))\n", + "\n", + " # Edge case: tiny dimensions\n", + " tests.append(self._make_test_case(1, 2, 2, 2))\n", + "\n", + " # Edge case: zero input (output should be skip * 0 = 0)\n", + " tests.append(self._make_test_case(1, 4, 4, 4, zero_u=True))\n", + "\n", + " # Edge case: zero delta (A_bar=1, B_bar=0, so state stays zero, output = skip * u)\n", + " tests.append(self._make_test_case(2, 4, 4, 4, zero_delta=True))\n", + "\n", + " # Power-of-2 lengths\n", + " tests.append(self._make_test_case(2, 16, 8, 4))\n", + " tests.append(self._make_test_case(2, 64, 16, 8))\n", + "\n", + " # Non-power-of-2\n", + " tests.append(self._make_test_case(2, 30, 12, 4))\n", + " tests.append(self._make_test_case(3, 100, 24, 8))\n", + "\n", + " # Typical d_state=16 (common Mamba setting)\n", + " tests.append(self._make_test_case(2, 128, 32, 16))\n", + "\n", + " # Realistic size\n", + " tests.append(self._make_test_case(4, 256, 64, 16))\n", + "\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " torch.manual_seed(0)\n", + " # batch=4, seq_len=4096, d_model=512, d_state=16\n", + " # Memory: u+delta+y ~ 3 * 4*4096*512*4 = 96MB; A+B+C+skip small\n", + " # Total << 1GB, comfortably fits 5x in 16GB T4\n", + " return self._make_test_case(4, 4096, 512, 16)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/challenges/colab_exports/medium/96_int8_kv_cache_attention.ipynb b/challenges/colab_exports/medium/96_int8_kv_cache_attention.ipynb new file mode 100644 index 00000000..68278080 --- /dev/null +++ b/challenges/colab_exports/medium/96_int8_kv_cache_attention.ipynb @@ -0,0 +1,536 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "config_cell" + }, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "desc_cell" + }, + "source": [ + "

    \nImplement decode-phase multi-head attention where the key and value caches are stored as\nint8 with per-token scale factors. This memory layout halves KV-cache bandwidth\nversus float32 and is used in production LLM serving systems such as TensorRT-LLM\nand vLLM. Given a query tensor Q for a single new token, int8 key cache\nK_int8, int8 value cache V_int8, and per-token scales\nk_scale and v_scale, dequantize the caches and compute scaled\ndot-product attention to produce output. All non-integer tensors use\nfloat32.\n

    \n\n

    Implementation Requirements

    \n
      \n
    • Implement the function solve(Q, K_int8, V_int8, k_scale, v_scale, output, num_heads, seq_len, head_dim).
    • \n
    • Do not change the function signature or use external libraries beyond the standard GPU frameworks.
    • \n
    • Write the result into the provided output buffer.
    • \n
    • Dequantize using per-token scales: K_float[h, s, d] = K_int8[h, s, d] × k_scale[h, s] (and analogously for V).
    • \n
    • Use scaled dot-product attention with scale factor 1 / sqrt(head_dim) and a softmax over the sequence dimension.
    • \n
    \n\n

    Example

    \n

    \n With num_heads = 1, seq_len = 3, head_dim = 4:\n

    \n

    \n Input:
    \n $Q$ (1×4):\n $$\n \\begin{bmatrix} 1 & 1 & 1 & 1 \\end{bmatrix}\n $$\n $K\\_int8$ (1×3×4):\n $$\n \\begin{bmatrix} 10 & 0 & 0 & 0 \\\\ 0 & 10 & 0 & 0 \\\\ 0 & 0 & 10 & 0 \\end{bmatrix}\n $$\n $k\\_scale$ (1×3): $\\begin{bmatrix} 0.1 & 0.1 & 0.1 \\end{bmatrix}$\n  ⇒ \n $K\\_float$ (1×3×4):\n $$\n \\begin{bmatrix} 1 & 0 & 0 & 0 \\\\ 0 & 1 & 0 & 0 \\\\ 0 & 0 & 1 & 0 \\end{bmatrix}\n $$\n $V\\_int8$ (1×3×4):\n $$\n \\begin{bmatrix} 10 & 20 & 30 & 40 \\\\ 50 & 60 & 70 & 80 \\\\ 90 & 100 & 110 & 120 \\end{bmatrix}\n $$\n $v\\_scale$ (1×3): $\\begin{bmatrix} 0.1 & 0.1 & 0.1 \\end{bmatrix}$\n  ⇒ \n $V\\_float$ (1×3×4):\n $$\n \\begin{bmatrix} 1 & 2 & 3 & 4 \\\\ 5 & 6 & 7 & 8 \\\\ 9 & 10 & 11 & 12 \\end{bmatrix}\n $$\n

    \n

    \n Scores = $Q \\cdot K\\_float^T / \\sqrt{4}$ = $\\begin{bmatrix} 0.5 & 0.5 & 0.5 \\end{bmatrix}$,\n so softmax weights = $\\begin{bmatrix} 1/3 & 1/3 & 1/3 \\end{bmatrix}$.\n

    \n

    \n Output (1×4):\n $$\n \\begin{bmatrix} 5.00 & 6.00 & 7.00 & 8.00 \\end{bmatrix}\n $$\n

    \n\n

    Constraints

    \n
      \n
    • 1 ≤ num_heads ≤ 64
    • \n
    • 1 ≤ seq_len ≤ 32,768
    • \n
    • 8 ≤ head_dim ≤ 256; head_dim is a multiple of 8
    • \n
    • K_int8 and V_int8 values are in $[-128, 127]$
    • \n
    • All scale values are positive float32
    • \n
    • Performance is measured with num_heads = 32, seq_len = 8,192, head_dim = 128
    • \n
    \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUDA" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cu", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.cu\n", + "#include \n\n// Q, K_int8, V_int8, k_scale, v_scale, output are device pointers\nextern \"C\" void solve(const float* Q, const int8_t* K_int8, const int8_t* V_int8,\n const float* k_scale, const float* v_scale, float* output, int num_heads,\n int seq_len, int head_dim) {}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CUTE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_cute_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import cutlass\nimport cutlass.cute as cute\n\n\n# Q, K_int8, V_int8, k_scale, v_scale, output are tensors on the GPU\n@cute.jit\ndef solve(\n Q: cute.Tensor,\n K_int8: cute.Tensor,\n V_int8: cute.Tensor,\n k_scale: cute.Tensor,\n v_scale: cute.Tensor,\n output: cute.Tensor,\n num_heads: cute.Int32,\n seq_len: cute.Int32,\n head_dim: cute.Int32,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# JAX" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_jax_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import jax\nimport jax.numpy as jnp\n\n\n# Q, K_int8, V_int8, k_scale, v_scale are tensors on GPU\n@jax.jit\ndef solve(\n Q: jax.Array,\n K_int8: jax.Array,\n V_int8: jax.Array,\n k_scale: jax.Array,\n v_scale: jax.Array,\n num_heads: int,\n seq_len: int,\n head_dim: int,\n) -> jax.Array:\n # return output tensor directly\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# MOJO" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_mojo", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.mojo\n", + "from std.gpu.host import DeviceContext\nfrom std.memory import UnsafePointer\n\n\n# Q, K_int8, V_int8, k_scale, v_scale, output are device pointers\n@export\ndef solve(\n Q: UnsafePointer[Float32, MutExternalOrigin],\n K_int8: UnsafePointer[Int8, MutExternalOrigin],\n V_int8: UnsafePointer[Int8, MutExternalOrigin],\n k_scale: UnsafePointer[Float32, MutExternalOrigin],\n v_scale: UnsafePointer[Float32, MutExternalOrigin],\n output: UnsafePointer[Float32, MutExternalOrigin],\n num_heads: Int32,\n seq_len: Int32,\n head_dim: Int32,\n) raises:\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Torch" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_pytorch_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\n\n\n# Q, K_int8, V_int8, k_scale, v_scale, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K_int8: torch.Tensor,\n V_int8: torch.Tensor,\n k_scale: torch.Tensor,\n v_scale: torch.Tensor,\n output: torch.Tensor,\n num_heads: int,\n seq_len: int,\n head_dim: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Triton" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "starter_triton_py", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "%%writefile solution.py\n", + "import torch\nimport triton\nimport triton.language as tl\n\n\n# Q, K_int8, V_int8, k_scale, v_scale, output are tensors on the GPU\ndef solve(\n Q: torch.Tensor,\n K_int8: torch.Tensor,\n V_int8: torch.Tensor,\n k_scale: torch.Tensor,\n v_scale: torch.Tensor,\n output: torch.Tensor,\n num_heads: int,\n seq_len: int,\n head_dim: int,\n):\n pass\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluate Setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "challenge_logic" + }, + "outputs": [], + "source": [ + "# --- Core Challenge Base ---\n", + "from abc import ABC, abstractmethod\n", + "from typing import Any, Dict, List\n", + "\n", + "\n", + "class ChallengeBase(ABC):\n", + " def __init__(self, name: str, atol: float, rtol: float, num_gpus: int, access_tier: str):\n", + " self.name = name\n", + " self.atol = atol\n", + " self.rtol = rtol\n", + " self.num_gpus = num_gpus\n", + " self.access_tier = access_tier\n", + "\n", + " @abstractmethod\n", + " def reference_impl(self, *args, **kwargs):\n", + " \"\"\"\n", + " Reference solution implementation.\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def get_solve_signature(self) -> Dict[str, Any]:\n", + " \"\"\"\n", + " Get the function signature for solution.\n", + "\n", + " Returns:\n", + " Dictionary with argtypes and restype for ctypes\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_example_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate an example test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate functional test cases for this problem.\n", + "\n", + " Returns:\n", + " List of test case dictionaries\n", + " \"\"\"\n", + " pass\n", + "\n", + " @abstractmethod\n", + " def generate_performance_test(self) -> List[Dict[str, Any]]:\n", + " \"\"\"\n", + " Generate a performance test case for this problem.\n", + "\n", + " Returns:\n", + " Dictionary with test case parameters\n", + " \"\"\"\n", + " pass\n", + "\n", + "\n", + "# --- Challenge Logic ---\n", + "import ctypes\n", + "import math\n", + "from typing import Any, Dict, List\n", + "\n", + "import torch\n", + "\n", + "\n", + "class Challenge(ChallengeBase):\n", + " def __init__(self):\n", + " super().__init__(\n", + " name=\"INT8 KV-Cache Attention\",\n", + " atol=1e-03,\n", + " rtol=1e-03,\n", + " num_gpus=1,\n", + " access_tier=\"free\",\n", + " )\n", + "\n", + " def reference_impl(\n", + " self,\n", + " Q: torch.Tensor,\n", + " K_int8: torch.Tensor,\n", + " V_int8: torch.Tensor,\n", + " k_scale: torch.Tensor,\n", + " v_scale: torch.Tensor,\n", + " output: torch.Tensor,\n", + " num_heads: int,\n", + " seq_len: int,\n", + " head_dim: int,\n", + " ):\n", + " assert Q.shape == (num_heads, head_dim)\n", + " assert K_int8.shape == (num_heads, seq_len, head_dim)\n", + " assert V_int8.shape == (num_heads, seq_len, head_dim)\n", + " assert k_scale.shape == (num_heads, seq_len)\n", + " assert v_scale.shape == (num_heads, seq_len)\n", + " assert output.shape == (num_heads, head_dim)\n", + " assert Q.dtype == torch.float32\n", + " assert K_int8.dtype == torch.int8\n", + " assert V_int8.dtype == torch.int8\n", + " assert k_scale.dtype == torch.float32\n", + " assert v_scale.dtype == torch.float32\n", + " assert output.dtype == torch.float32\n", + " assert Q.device.type == \"cuda\"\n", + " assert K_int8.device.type == \"cuda\"\n", + " assert V_int8.device.type == \"cuda\"\n", + " assert k_scale.device.type == \"cuda\"\n", + " assert v_scale.device.type == \"cuda\"\n", + " assert output.device.type == \"cuda\"\n", + "\n", + " # Dequantize: K_float[h, s, d] = K_int8[h, s, d] * k_scale[h, s]\n", + " K_float = K_int8.float() * k_scale.unsqueeze(-1) # [num_heads, seq_len, head_dim]\n", + " V_float = V_int8.float() * v_scale.unsqueeze(-1) # [num_heads, seq_len, head_dim]\n", + "\n", + " # Scaled dot-product attention: Q [num_heads, head_dim] attends to all seq_len positions\n", + " scale = 1.0 / math.sqrt(head_dim)\n", + " # scores: [num_heads, 1, seq_len]\n", + " scores = torch.bmm(Q.unsqueeze(1), K_float.transpose(1, 2)) * scale\n", + " weights = torch.softmax(scores, dim=-1) # [num_heads, 1, seq_len]\n", + "\n", + " # Weighted sum of V: [num_heads, 1, seq_len] @ [num_heads, seq_len, head_dim]\n", + " out = torch.bmm(weights, V_float) # [num_heads, 1, head_dim]\n", + " output.copy_(out.squeeze(1))\n", + "\n", + " def get_solve_signature(self) -> Dict[str, tuple]:\n", + " return {\n", + " \"Q\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"K_int8\": (ctypes.POINTER(ctypes.c_int8), \"in\"),\n", + " \"V_int8\": (ctypes.POINTER(ctypes.c_int8), \"in\"),\n", + " \"k_scale\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"v_scale\": (ctypes.POINTER(ctypes.c_float), \"in\"),\n", + " \"output\": (ctypes.POINTER(ctypes.c_float), \"out\"),\n", + " \"num_heads\": (ctypes.c_int, \"in\"),\n", + " \"seq_len\": (ctypes.c_int, \"in\"),\n", + " \"head_dim\": (ctypes.c_int, \"in\"),\n", + " }\n", + "\n", + " def _make_test_case(self, num_heads, seq_len, head_dim, zero_q=False, seed=None):\n", + " device = \"cuda\"\n", + " if seed is not None:\n", + " torch.manual_seed(seed)\n", + " if zero_q:\n", + " Q = torch.zeros(num_heads, head_dim, dtype=torch.float32, device=device)\n", + " else:\n", + " Q = torch.randn(num_heads, head_dim, dtype=torch.float32, device=device)\n", + " K_int8 = torch.randint(\n", + " -128, 128, (num_heads, seq_len, head_dim), dtype=torch.int8, device=device\n", + " )\n", + " V_int8 = torch.randint(\n", + " -128, 128, (num_heads, seq_len, head_dim), dtype=torch.int8, device=device\n", + " )\n", + " k_scale = torch.rand(num_heads, seq_len, dtype=torch.float32, device=device) * 0.1 + 0.01\n", + " v_scale = torch.rand(num_heads, seq_len, dtype=torch.float32, device=device) * 0.1 + 0.01\n", + " output = torch.empty(num_heads, head_dim, dtype=torch.float32, device=device)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K_int8\": K_int8,\n", + " \"V_int8\": V_int8,\n", + " \"k_scale\": k_scale,\n", + " \"v_scale\": v_scale,\n", + " \"output\": output,\n", + " \"num_heads\": num_heads,\n", + " \"seq_len\": seq_len,\n", + " \"head_dim\": head_dim,\n", + " }\n", + "\n", + " def generate_example_test(self) -> Dict[str, Any]:\n", + " device = \"cuda\"\n", + " num_heads, seq_len, head_dim = 1, 3, 4\n", + " Q = torch.tensor([[1.0, 1.0, 1.0, 1.0]], dtype=torch.float32, device=device)\n", + " K_int8 = torch.tensor(\n", + " [[[10, 0, 0, 0], [0, 10, 0, 0], [0, 0, 10, 0]]], dtype=torch.int8, device=device\n", + " )\n", + " V_int8 = torch.tensor(\n", + " [[[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]],\n", + " dtype=torch.int8,\n", + " device=device,\n", + " )\n", + " k_scale = torch.tensor([[0.1, 0.1, 0.1]], dtype=torch.float32, device=device)\n", + " v_scale = torch.tensor([[0.1, 0.1, 0.1]], dtype=torch.float32, device=device)\n", + " output = torch.empty(num_heads, head_dim, dtype=torch.float32, device=device)\n", + " return {\n", + " \"Q\": Q,\n", + " \"K_int8\": K_int8,\n", + " \"V_int8\": V_int8,\n", + " \"k_scale\": k_scale,\n", + " \"v_scale\": v_scale,\n", + " \"output\": output,\n", + " \"num_heads\": num_heads,\n", + " \"seq_len\": seq_len,\n", + " \"head_dim\": head_dim,\n", + " }\n", + "\n", + " def generate_functional_test(self) -> List[Dict[str, Any]]:\n", + " tests = []\n", + " # Edge: single key in cache\n", + " tests.append(self._make_test_case(1, 1, 8, seed=0))\n", + " # Edge: two keys\n", + " tests.append(self._make_test_case(1, 2, 8, seed=1))\n", + " # Edge: four keys, two heads\n", + " tests.append(self._make_test_case(2, 4, 8, seed=2))\n", + " # Zero query (uniform softmax weights)\n", + " tests.append(self._make_test_case(1, 8, 16, zero_q=True, seed=3))\n", + " # Power-of-2 seq_len\n", + " tests.append(self._make_test_case(4, 16, 64, seed=4))\n", + " tests.append(self._make_test_case(8, 64, 64, seed=5))\n", + " # Non-power-of-2\n", + " tests.append(self._make_test_case(2, 30, 64, seed=6))\n", + " tests.append(self._make_test_case(4, 100, 64, seed=7))\n", + " # Realistic sizes\n", + " tests.append(self._make_test_case(16, 512, 64, seed=8))\n", + " tests.append(self._make_test_case(32, 256, 128, seed=9))\n", + " return tests\n", + "\n", + " def generate_performance_test(self) -> Dict[str, Any]:\n", + " return self._make_test_case(32, 8192, 128, seed=42)\n", + "\n", + "\n", + "ch = Challenge()\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "evaluator", + "cellView": "form", + "collapsed": true + }, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import ctypes\n", + "import torch\n", + "\n", + "class Evaluate:\n", + " @staticmethod\n", + " def eval_cuda(ch):\n", + " # 1. Compile a fresh uniquely named library\n", + " so_filename = f'solution_func_{int(time.time())}.so'\n", + " os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}')\n", + " lib = ctypes.CDLL(f'./{so_filename}')\n", + " \n", + " # 2. Extract signature and set argtypes\n", + " signature = ch.get_solve_signature()\n", + " lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()]\n", + " \n", + " Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature)))\n", + "\n", + " @staticmethod\n", + " def eval_python(ch):\n", + " import importlib.util\n", + " import sys\n", + " \n", + " spec = importlib.util.spec_from_file_location(\"solution\", \"solution.py\")\n", + " solution = importlib.util.module_from_spec(spec)\n", + " sys.modules[\"solution\"] = solution\n", + " spec.loader.exec_module(solution)\n", + " \n", + " signature = ch.get_solve_signature()\n", + " Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs))\n", + "\n", + " @staticmethod\n", + " def _run_python(solution, kwargs):\n", + " solution.solve(**kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + "\n", + " @staticmethod\n", + " def eval_mojo(ch):\n", + " print(\"Mojo evaluation is currently executed via a separate runner or wrapper.\")\n", + " print(\"Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,\")\n", + " print(\"or run an external python bridge. This is a stub.\")\n", + "\n", + " @staticmethod\n", + " def _build_cuda_args(kwargs, signature):\n", + " cuda_args = []\n", + " for k, (arg_type, dir_type) in signature.items():\n", + " val = kwargs[k]\n", + " if isinstance(val, torch.Tensor):\n", + " cuda_args.append(ctypes.cast(val.data_ptr(), arg_type))\n", + " else:\n", + " cuda_args.append(arg_type(val))\n", + " return cuda_args\n", + "\n", + " @staticmethod\n", + " def _run_tests(ch, signature, run_fn):\n", + " print(\"=== Running Functional Tests ===\")\n", + " functional_tests = ch.generate_functional_test()\n", + " all_passed = True\n", + " \n", + " for i, test in enumerate(functional_tests):\n", + " ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()}\n", + " \n", + " # Run Reference\n", + " ch.reference_impl(**ref_kwargs)\n", + " \n", + " # Run implementation\n", + " run_fn(test_kwargs)\n", + " if torch.cuda.is_available():\n", + " torch.cuda.synchronize()\n", + " \n", + " # Verify outputs\n", + " match = True\n", + " for k, (_, dir_type) in signature.items():\n", + " if dir_type == \"out\":\n", + " if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol):\n", + " match = False\n", + " print(f\"\u274c Test {i+1}/{len(functional_tests)} Failed on output '{k}'\")\n", + " break\n", + " \n", + " if match:\n", + " print(f\"\u2705 Test {i+1}/{len(functional_tests)} Passed\")\n", + " else:\n", + " all_passed = False\n", + " break\n", + " \n", + " if all_passed:\n", + " print(\"\\n\ud83c\udf89 All functional tests passed!\")\n", + " return True\n", + " else:\n", + " return False\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Evaluation code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "disconnect" + }, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + } + ] +} \ No newline at end of file diff --git a/scripts/migrate_to_colab.py b/scripts/migrate_to_colab.py new file mode 100644 index 00000000..a952d99a --- /dev/null +++ b/scripts/migrate_to_colab.py @@ -0,0 +1,320 @@ +import os +import glob +import json +import re + +def generate_notebook(challenge_dir, output_dir): + parts = challenge_dir.strip('/').split('/') + level = parts[-2] + name = parts[-1] + + # Check if files exist + challenge_py_path = os.path.join(challenge_dir, "challenge.py") + challenge_html_path = os.path.join(challenge_dir, "challenge.html") + starter_dir = os.path.join(challenge_dir, "starter") + + if not os.path.exists(challenge_py_path) or not os.path.exists(challenge_html_path): + return + + print(f"Migrating {level} {name}...") + + cells = [] + + # 1. Config cell + cells.append({ + "cell_type": "code", + "execution_count": None, + "metadata": {"id": "config_cell"}, + "outputs": [], + "source": [ + "# Change this to your preferred framework (e.g., 'cuda', 'pytorch', 'triton', 'jax', 'mojo')\n", + "EVAL_LANG = 'cuda'\n", + "\n", + "SAVE_GPU = True\n" + ] + }) + + # 2. Markdown description + with open(challenge_html_path, 'r', encoding='utf-8') as f: + html_content = f.read() + + # Replace LaTeX delimiters with standard markdown/mathjax delimiters + html_content = html_content.replace("\\(", "$").replace("\\)", "$") + html_content = html_content.replace("\\[", "$$").replace("\\]", "$$") + + cells.append({ + "cell_type": "markdown", + "metadata": {"id": "desc_cell"}, + "source": [html_content] + }) + + # 3. Starter templates (hidden cells) + if os.path.exists(starter_dir): + for starter_file in sorted(os.listdir(starter_dir)): + if starter_file.startswith("starter."): + ext = starter_file[len("starter."):] + + # Header mapping + header_map = { + "cu": "# CUDA", + "cute.py": "# CUTE", + "jax.py": "# JAX", + "mojo": "# MOJO", + "pytorch.py": "# Torch", + "triton.py": "# Triton" + } + header_text = header_map.get(ext, f"# {ext.upper()}") + + cells.append({ + "cell_type": "markdown", + "metadata": {}, + "source": [header_text] + }) + + # Default output filenames + out_filename = "solution.cu" if ext == "cu" else f"solution.{ext}" + if out_filename.endswith(".py"): + out_filename = "solution.py" + + with open(os.path.join(starter_dir, starter_file), 'r', encoding='utf-8') as f: + starter_content = f.read() + + cells.append({ + "cell_type": "code", + "execution_count": None, + "metadata": { + "id": f"starter_{ext.replace('.', '_')}", + "cellView": "form", + "collapsed": True + }, + "outputs": [], + "source": [ + f"%%writefile {out_filename}\n", + starter_content + ] + }) + + # 4. Challenge Base & Challenge logic + base_py_path = os.path.join("challenges", "core", "challenge_base.py") + with open(base_py_path, 'r', encoding='utf-8') as f: + base_content = f.read() + + with open(challenge_py_path, 'r', encoding='utf-8') as f: + challenge_content = f.read() + + # Remove the import statement + challenge_content = re.sub(r"from core\.challenge_base import ChallengeBase\n?", "", challenge_content) + + combined_challenge = ( + "# --- Core Challenge Base ---\n" + + base_content + "\n\n" + + "# --- Challenge Logic ---\n" + + challenge_content + "\n\n" + + "ch = Challenge()\n" + ) + + cells.append({ + "cell_type": "markdown", + "metadata": {}, + "source": ["# Evaluate Setup"] + }) + + cells.append({ + "cell_type": "code", + "execution_count": None, + "metadata": {"id": "challenge_logic"}, + "outputs": [], + "source": [line + "\n" for line in combined_challenge.split("\n")] + }) + + # 5. Evaluator script + eval_script = """import os +import time +import ctypes +import torch + +class Evaluate: + @staticmethod + def eval_cuda(ch): + # 1. Compile a fresh uniquely named library + so_filename = f'solution_func_{int(time.time())}.so' + os.system(f'nvcc -shared -Xcompiler -fPIC -O3 solution.cu -o {so_filename}') + lib = ctypes.CDLL(f'./{so_filename}') + + # 2. Extract signature and set argtypes + signature = ch.get_solve_signature() + lib.solve.argtypes = [arg_info[0] for arg_info in signature.values()] + + Evaluate._run_tests(ch, signature, lambda kwargs: lib.solve(*Evaluate._build_cuda_args(kwargs, signature))) + + @staticmethod + def eval_python(ch): + import importlib.util + import sys + + spec = importlib.util.spec_from_file_location("solution", "solution.py") + solution = importlib.util.module_from_spec(spec) + sys.modules["solution"] = solution + spec.loader.exec_module(solution) + + signature = ch.get_solve_signature() + Evaluate._run_tests(ch, signature, lambda kwargs: Evaluate._run_python(solution, kwargs)) + + @staticmethod + def _run_python(solution, kwargs): + solution.solve(**kwargs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + + @staticmethod + def eval_mojo(ch): + print("Mojo evaluation is currently executed via a separate runner or wrapper.") + print("Ensure you have the mojo compiler installed and use 'mojo build solution.mojo' + ctypes/ffi,") + print("or run an external python bridge. This is a stub.") + + @staticmethod + def _build_cuda_args(kwargs, signature): + cuda_args = [] + for k, (arg_type, dir_type) in signature.items(): + val = kwargs[k] + if isinstance(val, torch.Tensor): + cuda_args.append(ctypes.cast(val.data_ptr(), arg_type)) + else: + cuda_args.append(arg_type(val)) + return cuda_args + + @staticmethod + def _run_tests(ch, signature, run_fn): + print("=== Running Functional Tests ===") + functional_tests = ch.generate_functional_test() + all_passed = True + + for i, test in enumerate(functional_tests): + ref_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()} + test_kwargs = {k: (v.clone() if isinstance(v, torch.Tensor) else v) for k, v in test.items()} + + # Run Reference + ch.reference_impl(**ref_kwargs) + + # Run implementation + run_fn(test_kwargs) + if torch.cuda.is_available(): + torch.cuda.synchronize() + + # Verify outputs + match = True + for k, (_, dir_type) in signature.items(): + if dir_type == "out": + if not torch.allclose(ref_kwargs[k], test_kwargs[k], atol=ch.atol, rtol=ch.rtol): + match = False + print(f"āŒ Test {i+1}/{len(functional_tests)} Failed on output '{k}'") + break + + if match: + print(f"āœ… Test {i+1}/{len(functional_tests)} Passed") + else: + all_passed = False + break + + if all_passed: + print("\\nšŸŽ‰ All functional tests passed!") + return True + else: + return False +""" + cells.append({ + "cell_type": "code", + "execution_count": None, + "metadata": {"id": "evaluator", "cellView": "form", "collapsed": True}, + "outputs": [], + "source": [line + "\n" for line in eval_script.split("\n")] + }) + + cells.append({ + "cell_type": "markdown", + "metadata": {}, + "source": ["# Evaluation code"] + }) + + # 6. Run and Disconnect runtime cell + cells.append({ + "cell_type": "code", + "execution_count": None, + "metadata": {"id": "disconnect"}, + "outputs": [], + "source": [ + "# Run the evaluator based on configuration\n", + "if EVAL_LANG == 'cuda':\n", + " Evaluate.eval_cuda(ch)\n", + "elif EVAL_LANG in ['pytorch', 'triton', 'jax', 'cute']:\n", + " Evaluate.eval_python(ch)\n", + "elif EVAL_LANG == 'mojo':\n", + " Evaluate.eval_mojo(ch)\n", + "else:\n", + " print(f\"Unknown language {EVAL_LANG}\")\n", + "\n", + "# Disconnect runtime to save Colab resources\n", + "if SAVE_GPU:\n", + " from google.colab import runtime\n", + " runtime.unassign()\n" + ] + }) + + notebook = { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "gpuType": "T4", + "provenance": [] + } + }, + "cells": cells + } + + out_dir_level = os.path.join(output_dir, level) + out_file = os.path.join(out_dir_level, f"{name}.ipynb") + os.makedirs(out_dir_level, exist_ok=True) + with open(out_file, 'w', encoding='utf-8') as f: + json.dump(notebook, f, indent=2) + print(f"Saved to {out_file}") + +if __name__ == "__main__": + out_dir = "challenges/colab_exports" + + exported_notebooks = [] + + challenges_glob = glob.glob("challenges/*/*") + for challenge_dir in challenges_glob: + if os.path.isdir(challenge_dir) and "colab_exports" not in challenge_dir: + generate_notebook(challenge_dir, out_dir) + parts = challenge_dir.strip('/').split('/') + level = parts[-2] + name = parts[-1] + if os.path.exists(os.path.join(out_dir, level, f"{name}.ipynb")): + exported_notebooks.append((level, name)) + + # Create README.md + readme_path = os.path.join(out_dir, "README.md") + with open(readme_path, "w", encoding="utf-8") as f: + f.write("# LeetGPU Colab Notebooks\n\n") + f.write("Click the badges below to open the challenges directly in Google Colab.\n\n") + + # Group by level + grouped = {} + for level, name in exported_notebooks: + if level not in grouped: + grouped[level] = [] + grouped[level].append(name) + + # Define specific sort order for levels + level_order = {"easy": 1, "medium": 2, "hard": 3} + for level in sorted(grouped.keys(), key=lambda x: level_order.get(x, 99)): + f.write(f"## {level.capitalize()}\n\n") + for name in sorted(grouped[level]): + colab_link = f"https://colab.research.google.com/github/lekhit/leetgpu-challenges/blob/main/challenges/colab_exports/{level}/{name}.ipynb" + badge = f"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)]({colab_link})" + f.write(f"- {badge} **{name}**\n") + f.write("\n") + print(f"Generated {readme_path}")