Skip to main content

Documentation Index

Fetch the complete documentation index at: https://resources.devweekends.com/llms.txt

Use this file to discover all available pages before exploring further.

The CP Toolkit

This is your battle-tested arsenal of code snippets. Every template here has been used in actual contests. Copy, paste, adapt, and win.
Pro Tip: Create a local file with all these snippets. Before each contest, have it open in a separate tab. During contest, Ctrl+F the pattern you need.

Lightning Round Snippets

Fast I/O Template

#include <bits/stdc++.h>
using namespace std;

#define ll long long
#define vi vector<int>
#define vll vector<ll>
#define pii pair<int, int>
#define pll pair<ll, ll>
#define all(x) (x).begin(), (x).end()
#define sz(x) (int)(x).size()

const int MOD = 1e9 + 7;
const int INF = 1e9;
const ll LINF = 1e18;

void solve() {
    // Your solution here
}

int main() {
    ios::sync_with_stdio(false);
    cin.tie(nullptr);
    
    int t;
    cin >> t;
    while (t--) {
        solve();
    }
    
    return 0;
}

Math Snippets

GCD and LCM

ll gcd(ll a, ll b) { return b ? gcd(b, a % b) : a; }
ll lcm(ll a, ll b) { return a / gcd(a, b) * b; }

// C++17 has __gcd and you can use:
// ll g = __gcd(a, b);

Modular Arithmetic

const ll MOD = 1e9 + 7;

ll mod(ll x) { return ((x % MOD) + MOD) % MOD; }
ll add(ll a, ll b) { return mod(mod(a) + mod(b)); }
ll sub(ll a, ll b) { return mod(mod(a) - mod(b)); }
ll mul(ll a, ll b) { return mod(mod(a) * mod(b)); }

ll power(ll base, ll exp, ll m = MOD) {
    ll result = 1;
    base %= m;
    while (exp > 0) {
        if (exp & 1) result = mul(result, base);
        base = mul(base, base);
        exp >>= 1;
    }
    return result;
}

ll modInverse(ll a) { return power(a, MOD - 2); }
ll divide(ll a, ll b) { return mul(a, modInverse(b)); }

nCr with Precomputation

const int MAXN = 2e5 + 5;
ll fact[MAXN], inv_fact[MAXN];

void precompute() {
    fact[0] = 1;
    for (int i = 1; i < MAXN; i++)
        fact[i] = mul(fact[i-1], i);
    
    inv_fact[MAXN-1] = modInverse(fact[MAXN-1]);
    for (int i = MAXN-2; i >= 0; i--)
        inv_fact[i] = mul(inv_fact[i+1], i+1);
}

ll nCr(int n, int r) {
    if (r < 0 || r > n) return 0;
    return mul(fact[n], mul(inv_fact[r], inv_fact[n-r]));
}

Prime Check and Sieve

bool isPrime(ll n) {
    if (n < 2) return false;
    if (n == 2 || n == 3) return true;
    if (n % 2 == 0 || n % 3 == 0) return false;
    for (ll i = 5; i * i <= n; i += 6) {
        if (n % i == 0 || n % (i + 2) == 0) return false;
    }
    return true;
}

// Sieve of Eratosthenes
const int MAXN = 1e6 + 5;
bool is_prime[MAXN];
vector<int> primes;

void sieve() {
    fill(is_prime, is_prime + MAXN, true);
    is_prime[0] = is_prime[1] = false;
    for (int i = 2; i < MAXN; i++) {
        if (is_prime[i]) {
            primes.push_back(i);
            for (ll j = (ll)i * i; j < MAXN; j += i)
                is_prime[j] = false;
        }
    }
}

Array/Vector Snippets

Prefix Sum

// Build prefix sum
vector<ll> prefix(n + 1, 0);
for (int i = 0; i < n; i++)
    prefix[i + 1] = prefix[i] + a[i];

// Range sum [l, r] (0-indexed)
ll rangeSum(int l, int r) {
    return prefix[r + 1] - prefix[l];
}

Coordinate Compression

vector<int> a(n);
// ... read a

vector<int> sorted_a = a;
sort(all(sorted_a));
sorted_a.erase(unique(all(sorted_a)), sorted_a.end());

// Compress
for (int& x : a) {
    x = lower_bound(all(sorted_a), x) - sorted_a.begin();
}
// Now a[i] ∈ [0, unique_count - 1]

Kadane’s Algorithm (Max Subarray Sum)

// O(n) -- the classic "extend or restart" trick
// At each position, decide: is it better to extend the current subarray
// by adding x, or start a fresh subarray beginning at x?
ll maxSubarraySum(vector<ll>& a) {
    ll maxSum = LLONG_MIN, currentSum = 0;
    for (ll x : a) {
        currentSum = max(x, currentSum + x);  // Extend or restart
        maxSum = max(maxSum, currentSum);      // Update global best
    }
    return maxSum;
}
// Edge case: if all elements are negative, this returns the largest
// (least negative) single element, which is correct.

Count Inversions (Merge Sort)

ll merge_count(vector<int>& arr, int l, int mid, int r) {
    vector<int> left(arr.begin() + l, arr.begin() + mid + 1);
    vector<int> right(arr.begin() + mid + 1, arr.begin() + r + 1);
    
    ll inv = 0;
    int i = 0, j = 0, k = l;
    
    while (i < left.size() && j < right.size()) {
        if (left[i] <= right[j]) {
            arr[k++] = left[i++];
        } else {
            arr[k++] = right[j++];
            inv += (left.size() - i);  // All remaining left elements are > right[j]
        }
    }
    while (i < left.size()) arr[k++] = left[i++];
    while (j < right.size()) arr[k++] = right[j++];
    
    return inv;
}

ll countInversions(vector<int>& arr, int l, int r) {
    ll inv = 0;
    if (l < r) {
        int mid = l + (r - l) / 2;
        inv += countInversions(arr, l, mid);
        inv += countInversions(arr, mid + 1, r);
        inv += merge_count(arr, l, mid, r);
    }
    return inv;
}

Binary Search Templates

Lower Bound (First ≥ target)

int lowerBound(vector<int>& a, int target) {
    int lo = 0, hi = a.size();
    while (lo < hi) {
        int mid = lo + (hi - lo) / 2;
        if (a[mid] < target) lo = mid + 1;
        else hi = mid;
    }
    return lo;  // First index where a[i] >= target
}

Upper Bound (First > target)

int upperBound(vector<int>& a, int target) {
    int lo = 0, hi = a.size();
    while (lo < hi) {
        int mid = lo + (hi - lo) / 2;
        if (a[mid] <= target) lo = mid + 1;
        else hi = mid;
    }
    return lo;  // First index where a[i] > target
}

Binary Search on Answer

// Check if 'mid' is achievable
bool canAchieve(int mid) {
    // Problem-specific logic
    return true;
}

int binarySearchAnswer() {
    int lo = 0, hi = 1e9;  // Adjust bounds
    
    while (lo < hi) {
        int mid = lo + (hi - lo) / 2;
        if (canAchieve(mid)) hi = mid;  // Find minimum
        // if (canAchieve(mid)) lo = mid + 1;  // Find maximum (then return lo - 1)
        else lo = mid + 1;
    }
    return lo;
}

Binary Search on Real Numbers

double binarySearchReal(double lo, double hi) {
    for (int iter = 0; iter < 100; iter++) {  // ~10^-30 precision
        double mid = (lo + hi) / 2;
        if (check(mid)) hi = mid;
        else lo = mid;
    }
    return lo;
}

Graph Snippets

Graph Representation

int n, m;  // nodes, edges
vector<vector<int>> adj(n + 1);  // Adjacency list

// For weighted graphs:
vector<vector<pair<int, int>>> adj(n + 1);  // {neighbor, weight}

// Read edges:
for (int i = 0; i < m; i++) {
    int u, v;
    cin >> u >> v;
    adj[u].push_back(v);
    adj[v].push_back(u);  // Remove for directed graph
}

DFS

vector<bool> visited(n + 1, false);

void dfs(int node) {
    visited[node] = true;
    // Process node
    
    for (int neighbor : adj[node]) {
        if (!visited[neighbor]) {
            dfs(neighbor);
        }
    }
}

BFS (Shortest Path Unweighted)

vector<int> bfs(int start) {
    vector<int> dist(n + 1, -1);
    queue<int> q;
    
    dist[start] = 0;
    q.push(start);
    
    while (!q.empty()) {
        int node = q.front();
        q.pop();
        
        for (int neighbor : adj[node]) {
            if (dist[neighbor] == -1) {
                dist[neighbor] = dist[node] + 1;
                q.push(neighbor);
            }
        }
    }
    return dist;
}

Dijkstra’s Algorithm

vector<ll> dijkstra(int start) {
    vector<ll> dist(n + 1, LINF);
    priority_queue<pll, vector<pll>, greater<pll>> pq;
    
    dist[start] = 0;
    pq.push({0, start});
    
    while (!pq.empty()) {
        auto [d, u] = pq.top();
        pq.pop();
        
        if (d > dist[u]) continue;
        
        for (auto [v, w] : adj[u]) {
            if (dist[u] + w < dist[v]) {
                dist[v] = dist[u] + w;
                pq.push({dist[v], v});
            }
        }
    }
    return dist;
}

Union-Find (DSU)

class DSU {
public:
    vector<int> parent, rank_;
    
    DSU(int n) : parent(n + 1), rank_(n + 1, 0) {
        iota(parent.begin(), parent.end(), 0);
    }
    
    int find(int x) {
        if (parent[x] != x)
            parent[x] = find(parent[x]);  // Path compression
        return parent[x];
    }
    
    bool unite(int x, int y) {
        int px = find(x), py = find(y);
        if (px == py) return false;
        
        if (rank_[px] < rank_[py]) swap(px, py);
        parent[py] = px;
        if (rank_[px] == rank_[py]) rank_[px]++;
        return true;
    }
    
    bool connected(int x, int y) {
        return find(x) == find(y);
    }
};

Topological Sort (Kahn’s Algorithm)

vector<int> toposort(int n, vector<vector<int>>& adj) {
    vector<int> indegree(n + 1, 0);
    for (int u = 1; u <= n; u++)
        for (int v : adj[u])
            indegree[v]++;
    
    queue<int> q;
    for (int i = 1; i <= n; i++)
        if (indegree[i] == 0)
            q.push(i);
    
    vector<int> order;
    while (!q.empty()) {
        int u = q.front();
        q.pop();
        order.push_back(u);
        
        for (int v : adj[u]) {
            if (--indegree[v] == 0)
                q.push(v);
        }
    }
    
    // order.size() == n means valid topo order exists
    return order;
}

Tree Snippets

Tree Diameter (Two BFS)

pair<int, int> farthest(int start) {
    vector<int> dist(n + 1, -1);
    queue<int> q;
    dist[start] = 0;
    q.push(start);
    
    int farthestNode = start, maxDist = 0;
    while (!q.empty()) {
        int node = q.front();
        q.pop();
        
        for (int neighbor : adj[node]) {
            if (dist[neighbor] == -1) {
                dist[neighbor] = dist[node] + 1;
                q.push(neighbor);
                
                if (dist[neighbor] > maxDist) {
                    maxDist = dist[neighbor];
                    farthestNode = neighbor;
                }
            }
        }
    }
    return {farthestNode, maxDist};
}

int treeDiameter() {
    auto [u, _] = farthest(1);
    auto [v, diameter] = farthest(u);
    return diameter;
}

LCA with Binary Lifting

const int LOG = 20;
vector<int> depth(n + 1);
vector<vector<int>> up(n + 1, vector<int>(LOG));

void dfs_lca(int node, int parent) {
    up[node][0] = parent;
    for (int i = 1; i < LOG; i++)
        up[node][i] = up[up[node][i-1]][i-1];
    
    for (int child : adj[node]) {
        if (child != parent) {
            depth[child] = depth[node] + 1;
            dfs_lca(child, node);
        }
    }
}

int lca(int a, int b) {
    if (depth[a] < depth[b]) swap(a, b);
    
    int diff = depth[a] - depth[b];
    for (int i = 0; i < LOG; i++)
        if ((diff >> i) & 1)
            a = up[a][i];
    
    if (a == b) return a;
    
    for (int i = LOG - 1; i >= 0; i--)
        if (up[a][i] != up[b][i]) {
            a = up[a][i];
            b = up[b][i];
        }
    
    return up[a][0];
}

DP Snippets

0/1 Knapsack

// Space-optimized 1D DP: O(n*W) time, O(W) space
int knapsack(vector<int>& weight, vector<int>& value, int W) {
    int n = weight.size();
    vector<int> dp(W + 1, 0);  // dp[w] = max value achievable with capacity w
    
    for (int i = 0; i < n; i++) {
        // CRITICAL: iterate w in REVERSE to avoid using item i twice.
        // If we went forward, dp[w - weight[i]] might already include item i,
        // turning 0/1 knapsack into unbounded knapsack.
        for (int w = W; w >= weight[i]; w--) {
            dp[w] = max(dp[w], dp[w - weight[i]] + value[i]);
        }
    }
    return dp[W];
}
// For UNBOUNDED knapsack (unlimited copies), iterate w FORWARD instead.

Longest Increasing Subsequence (LIS)

// O(n log n) using the patience sorting technique
// dp[i] = smallest possible tail element of an increasing subsequence of length i+1
// This is NOT the actual LIS -- it tracks tail values for efficient extension.
int lis(vector<int>& a) {
    vector<int> dp;
    
    for (int x : a) {
        // Find the first element in dp that is >= x
        auto it = lower_bound(dp.begin(), dp.end(), x);
        if (it == dp.end()) {
            dp.push_back(x);  // x extends the longest subsequence found so far
        } else {
            *it = x;  // x can improve (lower) the tail of an existing length
        }
    }
    return dp.size();
}
// For strictly increasing: use lower_bound (as above)
// For non-decreasing: use upper_bound instead

Coin Change (Min Coins)

int coinChange(vector<int>& coins, int amount) {
    vector<int> dp(amount + 1, INF);
    dp[0] = 0;
    
    for (int coin : coins) {
        for (int i = coin; i <= amount; i++) {
            dp[i] = min(dp[i], dp[i - coin] + 1);
        }
    }
    return dp[amount] == INF ? -1 : dp[amount];
}

Edit Distance

int editDistance(string& a, string& b) {
    int n = a.size(), m = b.size();
    vector<vector<int>> dp(n + 1, vector<int>(m + 1));
    
    for (int i = 0; i <= n; i++) dp[i][0] = i;
    for (int j = 0; j <= m; j++) dp[0][j] = j;
    
    for (int i = 1; i <= n; i++) {
        for (int j = 1; j <= m; j++) {
            if (a[i-1] == b[j-1]) {
                dp[i][j] = dp[i-1][j-1];
            } else {
                dp[i][j] = 1 + min({dp[i-1][j], dp[i][j-1], dp[i-1][j-1]});
            }
        }
    }
    return dp[n][m];
}

Data Structure Snippets

Segment Tree (Point Update, Range Query)

class SegmentTree {
    vector<ll> tree;
    int n;
    
    void build(vector<ll>& a, int node, int start, int end) {
        if (start == end) {
            tree[node] = a[start];
        } else {
            int mid = (start + end) / 2;
            build(a, 2*node, start, mid);
            build(a, 2*node+1, mid+1, end);
            tree[node] = tree[2*node] + tree[2*node+1];
        }
    }
    
    void update(int node, int start, int end, int idx, ll val) {
        if (start == end) {
            tree[node] = val;
        } else {
            int mid = (start + end) / 2;
            if (idx <= mid) update(2*node, start, mid, idx, val);
            else update(2*node+1, mid+1, end, idx, val);
            tree[node] = tree[2*node] + tree[2*node+1];
        }
    }
    
    ll query(int node, int start, int end, int l, int r) {
        if (r < start || end < l) return 0;
        if (l <= start && end <= r) return tree[node];
        int mid = (start + end) / 2;
        return query(2*node, start, mid, l, r) + 
               query(2*node+1, mid+1, end, l, r);
    }
    
public:
    SegmentTree(vector<ll>& a) {
        n = a.size();
        tree.resize(4 * n);
        build(a, 1, 0, n-1);
    }
    
    void update(int idx, ll val) { update(1, 0, n-1, idx, val); }
    ll query(int l, int r) { return query(1, 0, n-1, l, r); }
};

Binary Indexed Tree (Fenwick Tree)

class BIT {
    vector<ll> tree;
    int n;
    
public:
    BIT(int n) : n(n), tree(n + 1, 0) {}
    
    void update(int i, ll delta) {
        for (++i; i <= n; i += i & (-i))
            tree[i] += delta;
    }
    
    ll query(int i) {  // Sum [0, i]
        ll sum = 0;
        for (++i; i > 0; i -= i & (-i))
            sum += tree[i];
        return sum;
    }
    
    ll query(int l, int r) {  // Sum [l, r]
        return query(r) - (l ? query(l - 1) : 0);
    }
};

String Snippets

String Hashing

// Polynomial rolling hash: treats string as a base-31 number mod a large prime.
// This lets you compare any two substrings in O(1) after O(n) preprocessing.
// Collision probability: ~1/MOD per comparison (~10^-9). For extra safety,
// use double hashing (two different BASE/MOD pairs).
const ll BASE = 31, MOD = 1e9 + 9;
vector<ll> pw, h;

void buildHash(string& s) {
    int n = s.size();
    pw.resize(n + 1);  // pw[i] = BASE^i mod MOD
    h.resize(n + 1);   // h[i] = hash of s[0..i-1]
    
    pw[0] = 1;
    for (int i = 1; i <= n; i++)
        pw[i] = (pw[i-1] * BASE) % MOD;
    
    h[0] = 0;
    for (int i = 0; i < n; i++)
        h[i+1] = (h[i] * BASE + s[i] - 'a' + 1) % MOD;
}

ll getHash(int l, int r) {  // Hash of s[l..r] (0-indexed, inclusive)
    return (h[r+1] - h[l] * pw[r - l + 1] % MOD + MOD) % MOD;
}
// Common use: check if s[l1..r1] == s[l2..r2] in O(1)
// if (getHash(l1, r1) == getHash(l2, r2)) { /* likely equal */ }

KMP Pattern Matching

vector<int> computeLPS(string& pattern) {
    int m = pattern.size();
    vector<int> lps(m, 0);
    
    int len = 0, i = 1;
    while (i < m) {
        if (pattern[i] == pattern[len]) {
            lps[i++] = ++len;
        } else if (len) {
            len = lps[len - 1];
        } else {
            lps[i++] = 0;
        }
    }
    return lps;
}

vector<int> kmpSearch(string& text, string& pattern) {
    vector<int> lps = computeLPS(pattern);
    vector<int> matches;
    
    int n = text.size(), m = pattern.size();
    int i = 0, j = 0;
    
    while (i < n) {
        if (text[i] == pattern[j]) {
            i++; j++;
            if (j == m) {
                matches.push_back(i - j);
                j = lps[j - 1];
            }
        } else if (j) {
            j = lps[j - 1];
        } else {
            i++;
        }
    }
    return matches;
}

Bit Manipulation Snippets

// Check if ith bit is set
bool isSet(int n, int i) { return (n >> i) & 1; }

// Set ith bit
int setBit(int n, int i) { return n | (1 << i); }

// Clear ith bit
int clearBit(int n, int i) { return n & ~(1 << i); }

// Toggle ith bit
int toggleBit(int n, int i) { return n ^ (1 << i); }

// Count set bits
int countBits(int n) { return __builtin_popcount(n); }
int countBitsLL(ll n) { return __builtin_popcountll(n); }

// Lowest set bit
int lowestBit(int n) { return n & (-n); }

// Check if power of 2
bool isPowerOf2(int n) { return n && !(n & (n - 1)); }

// Iterate over all subsets of mask
for (int sub = mask; sub > 0; sub = (sub - 1) & mask) {
    // Process subset 'sub'
}

Keep this toolkit handy. Before every contest, skim through it. The more familiar these patterns become, the faster you’ll code during contests.Copy. Paste. Win.

Interview Deep-Dive

Strong Answer:
  • Both support O(log n) point updates and O(log n) range queries. The key differences are in constant factor, code complexity, and extensibility.
  • Fenwick Tree advantages: 2-3x faster constant factor (uses simple bit manipulation instead of recursive tree traversal), half the memory (n+1 array vs 4n array), and much shorter code (15 lines vs 40+ lines). For pure range-sum with point updates, BIT is strictly superior.
  • Segment Tree advantages: supports any associative operation (min, max, GCD, not just sum), supports range updates with lazy propagation, supports more complex queries (find kth element, count elements in range satisfying condition). BIT cannot do lazy propagation natively.
  • Decision framework: if the problem is range sum with point updates, use BIT. If the problem involves range min/max, range updates, or any operation beyond addition, use segment tree. If the problem requires persistent data structure (historical queries), use persistent segment tree.
  • In a contest, I default to BIT for sum queries because it is faster to write and has fewer bugs. I switch to segment tree only when the problem specifically requires range updates or non-additive operations.
Follow-up: Can you explain how the BIT uses bit manipulation to traverse the tree?The BIT stores partial sums where the index’s lowest set bit determines the range covered. For update at index i: add delta, then advance to the next index by adding the lowest set bit (i += i & (-i)). For prefix query up to index i: accumulate tree[i], then move to the parent by removing the lowest set bit (i -= i & (-i)). The expression i & (-i) isolates the lowest set bit because -i in two’s complement is the bitwise NOT of i, plus 1. This makes each operation visit at most O(log n) nodes, and the constant factor is a single bit operation per step.
Strong Answer:
  • KMP solves exact string matching in O(n + m) where n is the text length and m is the pattern length. The naive approach is O(nm) because after each mismatch, it restarts matching from the beginning of the pattern. KMP avoids this by precomputing how much of the pattern can be reused after a mismatch.
  • The failure function (also called LPS array for “longest proper prefix which is also suffix”) for each position i in the pattern stores the length of the longest proper prefix of pattern[0..i] that is also a suffix. This tells you: after a mismatch at position j in the pattern, you can skip ahead to position lps[j-1] instead of starting over at 0.
  • Building the LPS array is itself an elegant use of two pointers: a “current position” pointer and a “matched length” pointer. When characters match, both advance. When they do not match, the matched length pointer falls back to lps[matched_length - 1]. This is O(m) because each pointer advances at most m times total.
  • The matching phase works identically: maintain a pointer in the text and a pointer in the pattern. On match, advance both. On mismatch, fall back the pattern pointer using the LPS array. The text pointer never moves backward, so total work is O(n).
  • Total: O(n + m) time, O(m) space for the LPS array.
Follow-up: When would you choose string hashing over KMP, and what are the tradeoffs?String hashing is simpler to implement (fewer edge cases) and naturally extends to 2D pattern matching, multiple pattern matching, and substring comparison. The tradeoff: hashing has a collision probability (approximately 1/MOD per comparison), so it is probabilistic rather than deterministic. For contest problems, the collision risk with a single hash is about 10^-9, which is usually acceptable. For guaranteed correctness, use double hashing (two different base/mod pairs) which reduces collision probability to about 10^-18. I use hashing when I need substring comparison (is s[l1..r1] == s[l2..r2]?) and KMP when I need exact occurrence positions of a single pattern.
Strong Answer:
  • Both use a 1D DP array where dp[w] = maximum value achievable with capacity w. The difference is in the inner loop direction, and understanding why requires understanding what values dp[w] depends on at each step.
  • For 0/1 Knapsack (each item used at most once): iterate w from W down to weight[i]. When computing dp[w], we read dp[w - weight[i]], which has NOT yet been updated in this iteration of the outer loop. So dp[w - weight[i]] reflects a state where item i has not been used — exactly what we want for 0/1 knapsack.
  • For Unbounded Knapsack (unlimited copies): iterate w from weight[i] up to W. When computing dp[w], we read dp[w - weight[i]], which HAS already been updated in this iteration. So dp[w - weight[i]] might already include item i — which is fine because we can use it again.
  • The loop direction controls whether the current item’s contribution is counted once or multiple times. Reversing the loop is a single-line change that switches between two fundamentally different problems. This is one of the most elegant tricks in DP.
  • Memory optimization: both use O(W) space instead of the naive 2D O(nW) table. The 2D table makes the dependency structure explicit (dp[i][w] depends on dp[i-1][w] and dp[i-1][w-weight[i]] for 0/1). The 1D optimization collapses the item dimension by processing items in order and using the loop direction to implicitly encode whether the current item has been used.
Follow-up: Can you extend this to solve the “count number of ways” variant instead of “maximum value”?Yes. Replace max with + in the transition. For counting ways to make exact change: dp[0] = 1 (one way to make amount 0: use nothing), and for each coin, dp[w] += dp[w - coin]. The same loop direction logic applies: reverse loop for “each coin used at most once” (combination sum without repetition), forward loop for “unlimited coins” (combination sum with repetition). The initialization changes from dp[w] = 0 to dp[0] = 1, and the transition changes from max to sum.