diff --git a/docs/capabilities/server/redis.mdx b/docs/capabilities/server/redis.mdx
index bcca403..a1c6ef4 100644
--- a/docs/capabilities/server/redis.mdx
+++ b/docs/capabilities/server/redis.mdx
@@ -1,5 +1,5 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
# Redis
@@ -26,6 +26,12 @@ Each installation of an app is uniquely name-spaced, which means Redis data is s
All limits are applied at a per-installation granularity.
+If your app exceeds 500 MB of storage, writes to Redis may fail, which can affect features that depend on cached or stored data. To stay under the limit:
+
+- Evict data by removing keys you no longer need
+- Use TTLs to set expirations for temporary or stale data
+- Monitor your app's data usage and be selective about what you store
+
## Examples
### Menu actions
@@ -54,38 +60,38 @@ All limits are applied at a per-installation granularity.
]}>
- ```ts title="server/index.ts"
- import { redis } from '@devvit/redis';
- import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+```ts title="server/index.ts"
+import { redis } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
- app.post('/internal/menu/redis-test', async (c) => {
- const _request = await c.req.json();
- const key = 'hello';
- await redis.set(key, 'world');
- const value = await redis.get(key);
- console.log(`${key}: ${value}`);
- return c.json({ status: 'ok' });
- });
- ```
+app.post("/internal/menu/redis-test", async (c) => {
+ const _request = await c.req.json();
+ const key = "hello";
+ await redis.set(key, "world");
+ const value = await redis.get(key);
+ console.log(`${key}: ${value}`);
+ return c.json({ status: "ok" });
+});
+```
- ```ts title="server/index.ts"
- import { redis } from '@devvit/redis';
- import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+```ts title="server/index.ts"
+import { redis } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
- router.post(
- "/internal/menu/redis-test",
- async (_req, res) => {
- const key = 'hello';
- await redis.set(key, 'world');
- const value = await redis.get(key);
- console.log(`${key}: ${value}`);
- res.json({ status: 'ok' });
- },
- );
- ```
+router.post(
+ "/internal/menu/redis-test",
+ async (_req, res) => {
+ const key = "hello";
+ await redis.set(key, "world");
+ const value = await redis.get(key);
+ console.log(`${key}: ${value}`);
+ res.json({ status: "ok" });
+ }
+);
+```
@@ -106,7 +112,6 @@ All limits are applied at a per-installation granularity.
-
### Games
You can take a look at this [Game Template](https://github.com/reddit/devvit-template-phaser/) to see a basic implementation of Redis in a game built with Phaser.JS
@@ -140,10 +145,11 @@ For all examples below, we assume that you already have obtained a Redis Client.
redis: true,
});
- //Then, in any function that has a reference to Devvit.Context:
- const redis = context.redis;
- ```
-
+//Then, in any function that has a reference to Devvit.Context:
+const redis = context.redis;
+
+````
+
@@ -159,28 +165,28 @@ For all examples below, we assume that you already have obtained a Redis Client.
| [rename](https://redis.io/commands/rename) | Renames a key. | None |
-
- Code Example
-
+
+ Code Example
+
```tsx
async function simpleReadWriteExample() {
- // Set a key
- await redis.set('color', 'red');
+// Set a key
+await redis.set('color', 'red');
- // Check if a key exists
- console.log('Key exists: ' + (await redis.exists('color')));
+// Check if a key exists
+console.log('Key exists: ' + (await redis.exists('color')));
- // Get a key
- console.log('Color: ' + (await redis.get('color')));
+// Get a key
+console.log('Color: ' + (await redis.get('color')));
- // Get the type of a key
- console.log('Type: ' + (await redis.type('color')));
+// Get the type of a key
+console.log('Type: ' + (await redis.type('color')));
- // Delete a key
- await redis.del('color');
+// Delete a key
+await redis.del('color');
}
-```
+````
```bash
Color: red
@@ -191,9 +197,9 @@ Type: string
### Batch read/write
-| **Command** | **Action** | **Limits** |
-| -------------------------------------- | ----------------------------------------------- | ---------- |
-| [mGet](https://redis.io/commands/mget) | Returns the values of all specified keys. | None |
+| **Command** | **Action** | **Limits** |
+| -------------------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------- |
+| [mGet](https://redis.io/commands/mget) | Returns the values of all specified keys. | None |
| [mSet](https://redis.io/commands/mset) | Sets the given keys to their respective values. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -205,13 +211,13 @@ Type: string
async function batchReadWriteExample() {
// Set multiple keys at once
await redis.mSet({
- name: 'Devvit',
- occupation: 'Developer',
- yearsOfExperience: '9000',
+ name: "Devvit",
+ occupation: "Developer",
+ yearsOfExperience: "9000",
});
// Get multiple keys
- console.log('Result: ' + (await redis.mGet(['name', 'occupation'])));
+ console.log("Result: " + (await redis.mGet(["name", "occupation"])));
}
```
@@ -223,11 +229,11 @@ Result: Devvit,Developer
### Strings
-| **Command** | **Action** | **Limits** |
-| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------- |
-| [getRange](https://redis.io/commands/getrange) | Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). | None |
+| **Command** | **Action** | **Limits** |
+| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- |
+| [getRange](https://redis.io/commands/getrange) | Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). | None |
| [setRange](https://redis.io/commands/setrange) | Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [strLen](https://redis.io/commands/strlen) | Returns the length of the string value stored at key. | None |
+| [strLen](https://redis.io/commands/strlen) | Returns the length of the string value stored at key. | None |
@@ -237,18 +243,20 @@ Result: Devvit,Developer
```tsx
async function stringsExample() {
// First, set 'word' to 'tacocat'
- await redis.set('word', 'tacocat');
+ await redis.set("word", "tacocat");
// Use getRange() to get the letters in 'word' between index 0 to 3, inclusive
- console.log('Range from index 0 to 3: ' + (await redis.getRange('word', 0, 3)));
+ console.log(
+ "Range from index 0 to 3: " + (await redis.getRange("word", 0, 3))
+ );
// Use setRange() to insert 'blue' at index 0
- await redis.setRange('word', 0, 'blue');
+ await redis.setRange("word", 0, "blue");
- console.log('Word after using setRange(): ' + (await redis.get('word')));
+ console.log("Word after using setRange(): " + (await redis.get("word")));
// Use strLen() to verify the word length
- console.log('Word length: ' + (await redis.strLen('word')));
+ console.log("Word length: " + (await redis.strLen("word")));
}
```
@@ -264,18 +272,18 @@ Word length: 7
Redis hashes can store up to ~ 4.2 billion key-value pairs. We recommend using hash for managing collections of key-value pairs whenever possible and iterating over it using a combination of `hscan`, `hkeys` and `hgetall`.
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------- | --------------------------------------------------------------------------------- | ---------- |
-| [hGet](https://redis.io/commands/hget) | Returns the value associated with field in the hash stored at key. | None |
-| [hMGet](https://redis.io/commands/hmget) | Returns the value of all specified field in the hash stored at multiple keys. | May be disabled for your app (allowlisted feature) |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------------------------------- |
+| [hGet](https://redis.io/commands/hget) | Returns the value associated with field in the hash stored at key. | None |
+| [hMGet](https://redis.io/commands/hmget) | Returns the value of all specified field in the hash stored at multiple keys. | May be disabled for your app (allowlisted feature) |
| [hSet](https://redis.io/commands/hset/) | Sets the specified fields to their respective values in the hash stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
| [hSetNX](https://redis.io/commands/hsetnx/) | Sets field in the hash stored at key to value, only if field does not yet exist.ƒ | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [hDel](https://redis.io/commands/hdel/) | Removes the specified fields from the hash stored at key. | None |
-| [hGetAll](https://redis.io/commands/hgetall/) | Returns a map of fields and their values stored in the hash. | None |
-| [hKeys](https://redis.io/commands/hkeys/) | Returns all field names in the hash stored at key. | None |
-| [hScan](https://redis.io/commands/hscan/) | Iterates fields of Hash types and their associated values. | No server-side cap; uses requested count |
+| [hDel](https://redis.io/commands/hdel/) | Removes the specified fields from the hash stored at key. | None |
+| [hGetAll](https://redis.io/commands/hgetall/) | Returns a map of fields and their values stored in the hash. | None |
+| [hKeys](https://redis.io/commands/hkeys/) | Returns all field names in the hash stored at key. | None |
+| [hScan](https://redis.io/commands/hscan/) | Iterates fields of Hash types and their associated values. | No server-side cap; uses requested count |
| [hIncrBy](https://redis.io/commands/hincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [hLen](https://redis.io/commands/hlen/) | Returns the number of fields contained in the hash stored at key. | None |
+| [hLen](https://redis.io/commands/hlen/) | Returns the number of fields contained in the hash stored at key. | None |
@@ -323,17 +331,24 @@ Number of fields deleted: 3
// Example using hGetAll()
async function hashExample2() {
// Set 'groceryList' to fields containing products with quantities
- await redis.hSet('groceryList', {
- eggs: '12',
- apples: '3',
- milk: '1',
+ await redis.hSet("groceryList", {
+ eggs: "12",
+ apples: "3",
+ milk: "1",
});
// Get the groceryList record
- const record = await redis.hGetAll('groceryList');
+ const record = await redis.hGetAll("groceryList");
if (record != undefined) {
- console.log('Eggs: ' + record.eggs + ', Apples: ' + record.apples + ', Milk: ' + record.milk);
+ console.log(
+ "Eggs: " +
+ record.eggs +
+ ", Apples: " +
+ record.apples +
+ ", Milk: " +
+ record.milk
+ );
}
}
```
@@ -349,13 +364,13 @@ Eggs: 12, Apples: 3, Milk: 1
```tsx
// Example using hKeys()
async function hashExample3() {
- await redis.hSet('prices', {
- chair: '48',
- desk: '95',
- whiteboard: '23',
+ await redis.hSet("prices", {
+ chair: "48",
+ desk: "95",
+ whiteboard: "23",
});
- console.log('Keys: ' + (await redis.hKeys('prices')));
+ console.log("Keys: " + (await redis.hKeys("prices")));
}
```
@@ -370,14 +385,14 @@ Keys: chair,desk,whiteboard
```tsx
// Example using hScan()
async function hashExample4() {
- await redis.hSet('userInfo', {
- name: 'Bob',
- startDate: '01-05-20',
- totalAwards: '12',
+ await redis.hSet("userInfo", {
+ name: "Bob",
+ startDate: "01-05-20",
+ totalAwards: "12",
});
// Scan and interate over all the fields within 'userInfo'
- const hScanResponse = await redis.hScan('userInfo', 0);
+ const hScanResponse = await redis.hScan("userInfo", 0);
hScanResponse.fieldValues.forEach((x) => {
console.log("Field: '" + x.field + "', Value: '" + x.value + "'");
@@ -399,10 +414,10 @@ Field: 'startDate', Value: '01-05-20'
// Example using hIncrBy()
async function hashExample5() {
// Set user123's karma to 100
- await redis.hSet('user123', { karma: '100' });
+ await redis.hSet("user123", { karma: "100" });
// Increase user123's karma by 5
- console.log('Updated karma: ' + (await redis.hIncrBy('user123', 'karma', 5)));
+ console.log("Updated karma: " + (await redis.hIncrBy("user123", "karma", 5)));
}
```
@@ -417,14 +432,14 @@ Updated karma: 105
```tsx
// Example using hLen()
async function hashExample6() {
- await redis.hSet('supplies', {
- paperclips: '25',
- pencils: '10',
- erasers: '5',
- pens: '7',
+ await redis.hSet("supplies", {
+ paperclips: "25",
+ pencils: "10",
+ erasers: "5",
+ pens: "7",
});
- console.log('Number of fields: ' + (await redis.hLen('supplies')));
+ console.log("Number of fields: " + (await redis.hLen("supplies")));
}
```
@@ -436,8 +451,8 @@ Number of fields: 4
### Numbers
-| **Command** | **Action** | **Limits** |
-| ------------------------------------------ | ------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| ------------------------------------------ | ------------------------------------------------- | ------------------------------------------------------------------------- |
| [incrBy](https://redis.io/commands/incrby) | Increments the number stored at key by increment. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -447,9 +462,9 @@ Number of fields: 4
```tsx
async function numbersExample() {
- await redis.set('totalPoints', '53');
+ await redis.set("totalPoints", "53");
- console.log('Updated points: ' + (await redis.incrBy('totalPoints', 100)));
+ console.log("Updated points: " + (await redis.incrBy("totalPoints", 100)));
}
```
@@ -463,8 +478,8 @@ Updated points: 153
| **Command** | **Action** | **Limits** |
| --------------------------------------------------- | ----------------------------------------------------------------- | ---------- |
-| [expire](https://redis.io/commands/expire/) | Sets a timeout on key. | None |
-| [expireTime](https://redis.io/commands/expiretime/) | Returns the remaining seconds at which the given key will expire. | None |
+| [expire](https://redis.io/commands/expire/) | Sets a timeout on key. | None |
+| [expireTime](https://redis.io/commands/expiretime/) | Returns the remaining seconds at which the given key will expire. | None |
@@ -474,16 +489,16 @@ Updated points: 153
```tsx
async function keyExpirationExample() {
// Set a key 'product' with value 'milk'
- await redis.set('product', 'milk');
+ await redis.set("product", "milk");
// Get the current expireTime for the product
- console.log('Expire time: ' + (await redis.expireTime('product')));
+ console.log("Expire time: " + (await redis.expireTime("product")));
// Set the product to expire in 60 seconds
- await redis.expire('product', 60);
+ await redis.expire("product", 60);
// Get the updated expireTime for the product
- console.log('Updated expire time: ' + (await redis.expireTime('product')));
+ console.log("Updated expire time: " + (await redis.expireTime("product")));
}
```
@@ -508,13 +523,13 @@ You can sequence all of the above steps in a single transaction using `multi` an
If an error occurs inside a transaction before `exec` is called, Redis discards the transaction automatically. See the Redis docs: [Errors inside a transaction](https://redis.io/docs/latest/develop/interact/transactions/#errors-inside-a-transaction) for more info.
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------- |
| [multi](https://redis.io/commands/multi/) | Marks the start of a transaction block. | Max concurrent transactions per installation: 20 (default) |
-| [exec](https://redis.io/commands/exec/) | Executes all previously queued commands in a transaction and restores the connection state to normal. | Transaction execution timeout: 5 seconds |
-| [discard](https://redis.io/commands/discard/) | Flushes all previously queued commands in a transaction and restores the connection state to normal. | None |
-| [watch](https://redis.io/commands/watch/) | Marks the given keys to be watched for conditional execution of a transaction. `watch` returns a [TxClientLike](https://developers.reddit.com/docs/api/public-api/#-txclientlike) which should be used to call Redis commands in a transaction. | None |
-| [unwatch](https://redis.io/commands/unwatch/) | Flushes all the previously watched keys for a transaction. | None |
+| [exec](https://redis.io/commands/exec/) | Executes all previously queued commands in a transaction and restores the connection state to normal. | Transaction execution timeout: 5 seconds |
+| [discard](https://redis.io/commands/discard/) | Flushes all previously queued commands in a transaction and restores the connection state to normal. | None |
+| [watch](https://redis.io/commands/watch/) | Marks the given keys to be watched for conditional execution of a transaction. `watch` returns a [TxClientLike](https://developers.reddit.com/docs/api/public-api/#-txclientlike) which should be used to call Redis commands in a transaction. | None |
+| [unwatch](https://redis.io/commands/unwatch/) | Flushes all the previously watched keys for a transaction. | None |
@@ -526,18 +541,18 @@ If an error occurs inside a transaction before `exec` is called, Redis discards
```tsx
// Example using exec()
async function transactionsExample1() {
- await redis.mSet({ quantity: '5', karma: '32' });
+ await redis.mSet({ quantity: "5", karma: "32" });
- const txn = await redis.watch('quantity');
+ const txn = await redis.watch("quantity");
await txn.multi(); // Begin a transaction
- await txn.incrBy('karma', 10);
- await txn.set('name', 'Devvit');
+ await txn.incrBy("karma", 10);
+ await txn.set("name", "Devvit");
await txn.exec(); // Execute the commands in the transaction
console.log(
- 'Keys after completing transaction: ' +
- (await redis.mGet(['quantity', 'karma', 'name']))
+ "Keys after completing transaction: " +
+ (await redis.mGet(["quantity", "karma", "name"]))
);
}
```
@@ -553,15 +568,15 @@ Keys after completing transaction: 5,42,Devvit
```tsx
// Example using discard()
async function transactionsExample2() {
- await redis.set('price', '25');
+ await redis.set("price", "25");
- const txn = await redis.watch('price');
+ const txn = await redis.watch("price");
await txn.multi(); // Begin a transaction
- await txn.incrBy('price', 5);
+ await txn.incrBy("price", 5);
await txn.discard(); // Discard the commands in the transaction
- console.log('Price value: ' + (await redis.get('price'))); // 'price' should still be '25'
+ console.log("Price value: " + (await redis.get("price"))); // 'price' should still be '25'
}
```
@@ -576,21 +591,21 @@ Price value: 25
```tsx
// Example using unwatch()
async function transactionsExample3() {
- await redis.set('gold', '50');
+ await redis.set("gold", "50");
- const txn = await redis.watch('gold');
+ const txn = await redis.watch("gold");
await txn.multi(); // Begin a transaction
- await txn.incrBy('gold', 30);
+ await txn.incrBy("gold", 30);
await txn.unwatch(); // Unwatch "gold"
// Now that "gold" has been unwatched, we can increment its value
// outside the transaction without canceling the transaction
- await redis.incrBy('gold', -20);
+ await redis.incrBy("gold", -20);
await txn.exec(); // Execute the commands in the transaction
- console.log('Gold value: ' + (await redis.get('gold'))); // The value of 'gold' should be 50 + 30 - 20 = 60
+ console.log("Gold value: " + (await redis.get("gold"))); // The value of 'gold' should be 50 + 30 - 20 = 60
}
```
@@ -602,19 +617,19 @@ Gold value: 60
### Sorted set
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
-| [zAdd](https://redis.io/commands/zadd/) | Adds all the specified members with the specified scores to the sorted set stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [zCard](https://redis.io/commands/zcard) | Returns the sorted set cardinality (number of elements) of the sorted set stored at key. | None |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [zAdd](https://redis.io/commands/zadd/) | Adds all the specified members with the specified scores to the sorted set stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
+| [zCard](https://redis.io/commands/zcard) | Returns the sorted set cardinality (number of elements) of the sorted set stored at key. | None |
| [zRange](https://redis.io/commands/zrange/) | Returns the specified range of elements in the sorted set stored at key.
When using `by: 'lex'`, the start and stop inputs will be prepended with `[` by default, unless they already begin with `[`, `(` or are one of the special values `+` or `-`. | BYSCORE/BYLEX: LIMIT count capped to 1000 per call (server default). RANK: no server cap. Client default for by: 'score'/'lex' is count=1000 when no limit is provided. |
-| [zRem](https://redis.io/commands/zrem/) | Removes the specified members from the sorted set stored at key. | None |
-| [zScore](https://redis.io/commands/zscore/) | Returns the score of member in the sorted set at key. | None |
-| [zRank](https://redis.io/commands/zrank/) | Returns the rank of member in the sorted set stored at key. | None |
-| [zIncrBy](https://redis.io/commands/zincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [zScan](https://redis.io/commands/zscan/) | Iterates elements of sorted set types and their associated scores. Note that there is no guaranteed ordering of elements in the result. | No server-side cap; uses requested count |
-| [zRemRangeByLex](https://redis.io/commands/zremrangebylex/) | When all elements in a sorted set are inserted with the same score, this command removes the elements at key between the lexicographical range specified by min and max. | None |
-| [zRemRangeByRank](https://redis.io/commands/zremrangebyrank/) | Removes all elements in the sorted set stored at key with rank between start and stop. | None |
-| [zRemRangeByScore](https://redis.io/commands/zremrangebyscore/) | Removes all elements in the sorted set stored at key with a score between min and max (inclusive). | None |
+| [zRem](https://redis.io/commands/zrem/) | Removes the specified members from the sorted set stored at key. | None |
+| [zScore](https://redis.io/commands/zscore/) | Returns the score of member in the sorted set at key. | None |
+| [zRank](https://redis.io/commands/zrank/) | Returns the rank of member in the sorted set stored at key. | None |
+| [zIncrBy](https://redis.io/commands/zincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
+| [zScan](https://redis.io/commands/zscan/) | Iterates elements of sorted set types and their associated scores. Note that there is no guaranteed ordering of elements in the result. | No server-side cap; uses requested count |
+| [zRemRangeByLex](https://redis.io/commands/zremrangebylex/) | When all elements in a sorted set are inserted with the same score, this command removes the elements at key between the lexicographical range specified by min and max. | None |
+| [zRemRangeByRank](https://redis.io/commands/zremrangebyrank/) | Removes all elements in the sorted set stored at key with rank between start and stop. | None |
+| [zRemRangeByScore](https://redis.io/commands/zremrangebyscore/) | Removes all elements in the sorted set stored at key with a score between min and max (inclusive). | None |
@@ -627,29 +642,31 @@ Gold value: 60
// Example using zRange() with by 'score'
async function sortedSetExample1() {
await redis.zAdd(
- 'leaderboard',
- { member: 'louis', score: 37 },
- { member: 'fernando', score: 10 },
- { member: 'caesar', score: 20 },
- { member: 'alexander', score: 25 }
+ "leaderboard",
+ { member: "louis", score: 37 },
+ { member: "fernando", score: 10 },
+ { member: "caesar", score: 20 },
+ { member: "alexander", score: 25 }
);
// Cardinality should be '4' as there are 4 elements in the leaderboard set
- console.log('Cardinality: ' + (await redis.zCard('leaderboard')));
+ console.log("Cardinality: " + (await redis.zCard("leaderboard")));
// View elements with scores between 0 and 30 inclusive, sorted by score
- let scores = await redis.zRange('leaderboard', 0, 30, { by: 'score' });
- console.log('Scores: ' + JSON.stringify(scores));
+ let scores = await redis.zRange("leaderboard", 0, 30, { by: "score" });
+ console.log("Scores: " + JSON.stringify(scores));
// Remove 'fernando' from the leaderboard
- await redis.zRem('leaderboard', ['fernando']);
+ await redis.zRem("leaderboard", ["fernando"]);
// View the elements sorted by score again. This time 'fernando' should not appear in the output
- scores = await redis.zRange('leaderboard', 0, 30, { by: 'score' });
- console.log('Updated scores: ' + JSON.stringify(scores));
+ scores = await redis.zRange("leaderboard", 0, 30, { by: "score" });
+ console.log("Updated scores: " + JSON.stringify(scores));
// View caesar's score
- console.log("Caesar's score: " + (await redis.zScore('leaderboard', 'caesar')));
+ console.log(
+ "Caesar's score: " + (await redis.zScore("leaderboard", "caesar"))
+ );
}
```
@@ -668,17 +685,19 @@ Caesar's score: 20
// Example using zRange() with by 'lex'
async function sortedSetExample2() {
await redis.zAdd(
- 'checkpoints',
- { member: 'delta', score: 0 },
- { member: 'omega', score: 0 },
- { member: 'alpha', score: 0 },
- { member: 'charlie', score: 0 }
+ "checkpoints",
+ { member: "delta", score: 0 },
+ { member: "omega", score: 0 },
+ { member: "alpha", score: 0 },
+ { member: "charlie", score: 0 }
);
// View elements between the words 'alpha' and 'fox' inclusive, sorted lexicographically
// Note that 'by: "lex"' only works if all elements have the same score
- const members = await redis.zRange('checkpoints', 'alpha', 'fox', { by: 'lex' });
- console.log('Members: ' + JSON.stringify(members));
+ const members = await redis.zRange("checkpoints", "alpha", "fox", {
+ by: "lex",
+ });
+ console.log("Members: " + JSON.stringify(members));
}
```
@@ -694,17 +713,17 @@ Members: [{"score":0,"member":"alpha"},{"score":0,"member":"charlie"},{"score":0
// Example using zRange() with by 'rank'
async function sortedSetExample3() {
await redis.zAdd(
- 'grades',
- { member: 'sam', score: 80 },
- { member: 'norma', score: 95 },
- { member: 'alex', score: 77 },
- { member: 'don', score: 84 },
- { member: 'zeek', score: 92 }
+ "grades",
+ { member: "sam", score: 80 },
+ { member: "norma", score: 95 },
+ { member: "alex", score: 77 },
+ { member: "don", score: 84 },
+ { member: "zeek", score: 92 }
);
// View elements with a rank between 2 and 4 inclusive. Note that ranks start at index 0.
- const members = await redis.zRange('grades', 2, 4, { by: 'rank' });
- console.log('Members: ' + JSON.stringify(members));
+ const members = await redis.zRange("grades", 2, 4, { by: "rank" });
+ console.log("Members: " + JSON.stringify(members));
}
```
@@ -720,26 +739,27 @@ Members: [{"score":84,"member":"don"},{"score":92,"member":"zeek"},{"score":95,"
// Example using zRank() and zIncrBy()
async function sortedSetExample4() {
await redis.zAdd(
- 'animals',
- { member: 'zebra', score: 92 },
- { member: 'cat', score: 100 },
- { member: 'dog', score: 95 },
- { member: 'elephant', score: 97 }
+ "animals",
+ { member: "zebra", score: 92 },
+ { member: "cat", score: 100 },
+ { member: "dog", score: 95 },
+ { member: "elephant", score: 97 }
);
// View the rank of 'dog' in the animals set
// Rank should be '1' since 'dog' has the second lowest score. Note that ranks start at index 0.
- console.log("Dog's rank: " + (await redis.zRank('animals', 'dog')));
+ console.log("Dog's rank: " + (await redis.zRank("animals", "dog")));
// View the rank of 'zebra'
- console.log("Zebra's rank: " + (await redis.zRank('animals', 'zebra')));
+ console.log("Zebra's rank: " + (await redis.zRank("animals", "zebra")));
// Increase the score of 'dog' by 10
- await redis.zIncrBy('animals', 'dog', 10);
+ await redis.zIncrBy("animals", "dog", 10);
// View the rank of 'dog' again. This time it should be '3' because dog has the highest score.
console.log(
- "Dog's rank after incrementing score: " + (await redis.zRank('animals', 'dog'))
+ "Dog's rank after incrementing score: " +
+ (await redis.zRank("animals", "dog"))
);
}
```
@@ -758,21 +778,21 @@ Dog's rank after incrementing score: 3
// Example using zRemRangeByLex()
async function sortedSetExample5() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 0 },
- { member: 'mango', score: 0 },
- { member: 'banana', score: 0 },
- { member: 'orange', score: 0 },
- { member: 'apple', score: 0 }
+ "fruits",
+ { member: "kiwi", score: 0 },
+ { member: "mango", score: 0 },
+ { member: "banana", score: 0 },
+ { member: "orange", score: 0 },
+ { member: "apple", score: 0 }
);
// Remove fruits alphabetically ordered between 'kiwi' inclusive and 'orange' exclusive
// Note: The symbols '[' and '(' indicate inclusive or exclusive, respectively. These must be included in the call to zRemRangeByLex().
- await redis.zRemRangeByLex('fruits', '[kiwi', '(orange');
+ await redis.zRemRangeByLex("fruits", "[kiwi", "(orange");
// Only 'apple', 'banana', and 'orange' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -788,20 +808,20 @@ zScanResponse: {"cursor":0,"members":[{"score":0,"member":"apple"},{"score":0,"m
// Example using zRemRangeByRank()
async function sortedSetExample6() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 10 },
- { member: 'mango', score: 20 },
- { member: 'banana', score: 30 },
- { member: 'orange', score: 40 },
- { member: 'apple', score: 50 }
+ "fruits",
+ { member: "kiwi", score: 10 },
+ { member: "mango", score: 20 },
+ { member: "banana", score: 30 },
+ { member: "orange", score: 40 },
+ { member: "apple", score: 50 }
);
// Remove fruits ranked 1 through 3 inclusive
- await redis.zRemRangeByRank('fruits', 1, 3);
+ await redis.zRemRangeByRank("fruits", 1, 3);
// Only 'kiwi' and 'apple' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -817,20 +837,20 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":50,"
// Example using zRemRangeByScore() example
async function sortedSetExample7() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 10 },
- { member: 'mango', score: 20 },
- { member: 'banana', score: 30 },
- { member: 'orange', score: 40 },
- { member: 'apple', score: 50 }
+ "fruits",
+ { member: "kiwi", score: 10 },
+ { member: "mango", score: 20 },
+ { member: "banana", score: 30 },
+ { member: "orange", score: 40 },
+ { member: "apple", score: 50 }
);
// Remove fruits scored between 30 and 50 inclusive
- await redis.zRemRangeByScore('fruits', 30, 50);
+ await redis.zRemRangeByScore("fruits", 30, 50);
// Only 'kiwi' and 'mango' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -842,8 +862,8 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":20,"
### Bitfield
-| **Command** | **Action** | **Limits** |
-| ----------------------------------------------------------- | ------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| ----------------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------------------------------- |
| [bitfield](https://redis.io/docs/latest/commands/bitfield/) | Performs a sequence of operations on a bit string | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -853,40 +873,40 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":20,"
```tsx
async function bitfieldExample() {
- const setBits: number[] = await redis.bitfield('foo', 'set', 'i5', '#0', 11);
- console.log('Set result: ' + setBits); // [0]
+ const setBits: number[] = await redis.bitfield("foo", "set", "i5", "#0", 11);
+ console.log("Set result: " + setBits); // [0]
- const getBits: number[] = await redis.bitfield('foo', 'get', 'i5', '#0');
- console.log('Get result: ' + setBits); // [11]
+ const getBits: number[] = await redis.bitfield("foo", "get", "i5", "#0");
+ console.log("Get result: " + setBits); // [11]
const manyOperations: number[] = await redis.bitfield(
- 'bar',
- 'set',
- 'u2',
+ "bar",
+ "set",
+ "u2",
0,
3,
- 'get',
- 'u2',
+ "get",
+ "u2",
0,
- 'incrBy',
- 'u2',
+ "incrBy",
+ "u2",
0,
1,
- 'overflow',
- 'sat',
- 'get',
- 'u2',
+ "overflow",
+ "sat",
+ "get",
+ "u2",
0,
- 'set',
- 'u2',
+ "set",
+ "u2",
0,
3,
- 'incrBy',
- 'u2',
+ "incrBy",
+ "u2",
0,
1
);
- console.log('Results of many operations: ' + manyOperations); // [0, 3, 0, 0, 3, 3]
+ console.log("Results of many operations: " + manyOperations); // [0, 3, 0, 0, 3, 3]
}
```
@@ -905,7 +925,7 @@ To use it, update your import:
```ts
// import { redis } from '@devvit/redis';
-import { redisCompressed as redis } from '@devvit/redis';
+import { redisCompressed as redis } from "@devvit/redis";
```
:::warning
@@ -913,6 +933,7 @@ import { redisCompressed as redis } from '@devvit/redis';
:::
The `redisCompressed` client automatically:
+
- Compresses values on write (`set`, `hSet`, `mSet`, `hSetNX`) if it saves space.
- Decompresses values on read (`get`, `hGet`, `mGet`, `hMGet`, `hGetAll`).
@@ -964,10 +985,15 @@ Add these route handlers to your server.
```ts
-import { redis, scheduler, type TaskRequest, type TaskResponse } from '@devvit/web/server';
+import {
+ redis,
+ scheduler,
+ type TaskRequest,
+ type TaskResponse,
+} from "@devvit/web/server";
// Import the compressed client
-import { redisCompressed } from '@devvit/redis';
-import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+import { redisCompressed } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
type MigrateExampleFormRequest = {
startCursor?: string;
@@ -980,28 +1006,28 @@ type MigrateExampleJobData = {
processed?: number;
};
-const MY_DATA_HASH_KEY = 'my:app:large:dataset';
+const MY_DATA_HASH_KEY = "my:app:large:dataset";
// 1. Menu Endpoint: Returns the form definition
-app.post('/internal/menu/ops/migrate-example', async (c) => {
+app.post("/internal/menu/ops/migrate-example", async (c) => {
const _request = await c.req.json();
return c.json({
showForm: {
- name: 'migrateExampleForm', // Must match key in devvit.json "forms"
+ name: "migrateExampleForm", // Must match key in devvit.json "forms"
form: {
- title: 'Migrate Hash to Compression',
- acceptLabel: 'Start Migration',
+ title: "Migrate Hash to Compression",
+ acceptLabel: "Start Migration",
fields: [
{
- name: 'startCursor',
- label: 'Start Cursor (0 for beginning)',
- type: 'string',
- defaultValue: '0',
+ name: "startCursor",
+ label: "Start Cursor (0 for beginning)",
+ type: "string",
+ defaultValue: "0",
},
{
- name: 'chunkSize',
- label: 'Items per batch',
- type: 'number',
+ name: "chunkSize",
+ label: "Items per batch",
+ type: "number",
defaultValue: 20000,
},
],
@@ -1011,18 +1037,20 @@ app.post('/internal/menu/ops/migrate-example', async (c) => {
});
// 2. Form Handler: Receives input and schedules the first job
-app.post('/internal/form/ops/migrate-example', async (c) => {
- const body = await c.req.json().catch(
- () => ({} as MigrateExampleFormRequest)
- );
- const cursor = body.startCursor || '0';
+app.post("/internal/form/ops/migrate-example", async (c) => {
+ const body = await c.req
+ .json()
+ .catch(() => ({} as MigrateExampleFormRequest));
+ const cursor = body.startCursor || "0";
const size = Number(body.chunkSize) || 20000;
- console.log(`[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`);
+ console.log(
+ `[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`
+ );
// Kick off the first job in the chain
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(), // Run immediately
data: {
cursor,
@@ -1033,27 +1061,29 @@ app.post('/internal/form/ops/migrate-example', async (c) => {
return c.json({
showToast: {
- text: 'Migration started in background',
- appearance: 'success',
+ text: "Migration started in background",
+ appearance: "success",
},
});
});
// 3. Scheduler Endpoint: The recursive worker
-app.post('/internal/scheduler/migrate-example-data', async (c) => {
+app.post("/internal/scheduler/migrate-example-data", async (c) => {
const startTime = Date.now();
try {
- const body = await c.req.json>().catch(
- () => ({} as TaskRequest)
- );
+ const body = await c.req
+ .json>()
+ .catch(() => ({} as TaskRequest));
const data = body.data;
let cursor = Number(data?.cursor) || 0;
const chunkSize = Number(data?.chunkSize) || 20000;
const processedTotal = Number(data?.processed) || 0;
- console.log(`[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`);
+ console.log(
+ `[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`
+ );
let keepRunning = true;
let processedInJob = 0;
@@ -1100,7 +1130,7 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
// Safety: Check execution time.
// If we are close to 30s (Devvit limit), stop early and requeue.
if (Date.now() - startTime > 20000) {
- console.log('[Migration] Time limit approaching, stopping early.');
+ console.log("[Migration] Time limit approaching, stopping early.");
keepRunning = false;
}
}
@@ -1111,9 +1141,11 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
// If the cursor is not 0, we still have more data to scan.
// We schedule *this same job* to run again immediately.
if (cursor !== 0) {
- console.log(`[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`);
+ console.log(
+ `[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`
+ );
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(),
data: {
cursor,
@@ -1122,14 +1154,21 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
},
});
- return c.json({ status: 'requeued', processed: newTotal, cursor });
+ return c.json({
+ status: "requeued",
+ processed: newTotal,
+ cursor,
+ });
}
console.log(`[Migration] COMPLETE. Total items processed: ${newTotal}`);
- return c.json({ status: 'success', processed: newTotal });
+ return c.json({ status: "success", processed: newTotal });
} catch (error) {
- console.error('[Migration] Critical Job Error', error);
- return c.json({ status: 'error', message: error.message }, 500);
+ console.error("[Migration] Critical Job Error", error);
+ return c.json(
+ { status: "error", message: error.message },
+ 500
+ );
}
});
```
@@ -1138,10 +1177,15 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
```ts
-import { redis, scheduler, type TaskRequest, type TaskResponse } from '@devvit/web/server';
+import {
+ redis,
+ scheduler,
+ type TaskRequest,
+ type TaskResponse,
+} from "@devvit/web/server";
// Import the compressed client
-import { redisCompressed } from '@devvit/redis';
-import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+import { redisCompressed } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
type MigrateExampleFormRequest = {
startCursor?: string;
@@ -1154,51 +1198,54 @@ type MigrateExampleJobData = {
processed?: number;
};
-const MY_DATA_HASH_KEY = 'my:app:large:dataset';
+const MY_DATA_HASH_KEY = "my:app:large:dataset";
// 1. Menu Endpoint: Returns the form definition
app.post(
- '/internal/menu/ops/migrate-example',
+ "/internal/menu/ops/migrate-example",
async (_req, res) => {
res.json({
showForm: {
- name: 'migrateExampleForm', // Must match key in devvit.json "forms"
+ name: "migrateExampleForm", // Must match key in devvit.json "forms"
form: {
- title: 'Migrate Hash to Compression',
- acceptLabel: 'Start Migration',
+ title: "Migrate Hash to Compression",
+ acceptLabel: "Start Migration",
fields: [
{
- name: 'startCursor',
- label: 'Start Cursor (0 for beginning)',
- type: 'string',
- defaultValue: '0',
+ name: "startCursor",
+ label: "Start Cursor (0 for beginning)",
+ type: "string",
+ defaultValue: "0",
},
{
- name: 'chunkSize',
- label: 'Items per batch',
- type: 'number',
+ name: "chunkSize",
+ label: "Items per batch",
+ type: "number",
defaultValue: 20000,
},
],
},
},
});
- },
+ }
);
// 2. Form Handler: Receives input and schedules the first job
app.post(
- '/internal/form/ops/migrate-example',
+ "/internal/form/ops/migrate-example",
async (req, res) => {
- const { startCursor, chunkSize } = req.body ?? ({} as MigrateExampleFormRequest);
- const cursor = startCursor || '0';
+ const { startCursor, chunkSize } =
+ req.body ?? ({} as MigrateExampleFormRequest);
+ const cursor = startCursor || "0";
const size = Number(chunkSize) || 20000;
- console.log(`[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`);
+ console.log(
+ `[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`
+ );
// Kick off the first job in the chain
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(), // Run immediately
data: {
cursor,
@@ -1209,16 +1256,16 @@ app.post(
res.json({
showToast: {
- text: 'Migration started in background',
- appearance: 'success',
+ text: "Migration started in background",
+ appearance: "success",
},
});
- },
+ }
);
// 3. Scheduler Endpoint: The recursive worker
app.post>(
- '/internal/scheduler/migrate-example-data',
+ "/internal/scheduler/migrate-example-data",
async (req, res) => {
const startTime = Date.now();
@@ -1229,7 +1276,9 @@ app.post>(
const chunkSize = Number(data?.chunkSize) || 20000;
const processedTotal = Number(data?.processed) || 0;
- console.log(`[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`);
+ console.log(
+ `[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`
+ );
let keepRunning = true;
let processedInJob = 0;
@@ -1276,7 +1325,7 @@ app.post>(
// Safety: Check execution time.
// If we are close to 30s (Devvit limit), stop early and requeue.
if (Date.now() - startTime > 20000) {
- console.log('[Migration] Time limit approaching, stopping early.');
+ console.log("[Migration] Time limit approaching, stopping early.");
keepRunning = false;
}
}
@@ -1287,9 +1336,11 @@ app.post>(
// If the cursor is not 0, we still have more data to scan.
// We schedule *this same job* to run again immediately.
if (cursor !== 0) {
- console.log(`[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`);
+ console.log(
+ `[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`
+ );
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(),
data: {
cursor,
@@ -1298,16 +1349,16 @@ app.post>(
},
});
- res.json({ status: 'requeued', processed: newTotal, cursor });
+ res.json({ status: "requeued", processed: newTotal, cursor });
} else {
console.log(`[Migration] COMPLETE. Total items processed: ${newTotal}`);
- res.json({ status: 'success', processed: newTotal });
+ res.json({ status: "success", processed: newTotal });
}
} catch (error) {
- console.error('[Migration] Critical Job Error', error);
- res.status(500).json({ status: 'error', message: error.message });
+ console.error("[Migration] Critical Job Error", error);
+ res.status(500).json({ status: "error", message: error.message });
}
- },
+ }
);
```
diff --git a/versioned_docs/version-0.12/capabilities/server/redis.mdx b/versioned_docs/version-0.12/capabilities/server/redis.mdx
index bcca403..a1c6ef4 100644
--- a/versioned_docs/version-0.12/capabilities/server/redis.mdx
+++ b/versioned_docs/version-0.12/capabilities/server/redis.mdx
@@ -1,5 +1,5 @@
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
# Redis
@@ -26,6 +26,12 @@ Each installation of an app is uniquely name-spaced, which means Redis data is s
All limits are applied at a per-installation granularity.
+If your app exceeds 500 MB of storage, writes to Redis may fail, which can affect features that depend on cached or stored data. To stay under the limit:
+
+- Evict data by removing keys you no longer need
+- Use TTLs to set expirations for temporary or stale data
+- Monitor your app's data usage and be selective about what you store
+
## Examples
### Menu actions
@@ -54,38 +60,38 @@ All limits are applied at a per-installation granularity.
]}>
- ```ts title="server/index.ts"
- import { redis } from '@devvit/redis';
- import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+```ts title="server/index.ts"
+import { redis } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
- app.post('/internal/menu/redis-test', async (c) => {
- const _request = await c.req.json();
- const key = 'hello';
- await redis.set(key, 'world');
- const value = await redis.get(key);
- console.log(`${key}: ${value}`);
- return c.json({ status: 'ok' });
- });
- ```
+app.post("/internal/menu/redis-test", async (c) => {
+ const _request = await c.req.json();
+ const key = "hello";
+ await redis.set(key, "world");
+ const value = await redis.get(key);
+ console.log(`${key}: ${value}`);
+ return c.json({ status: "ok" });
+});
+```
- ```ts title="server/index.ts"
- import { redis } from '@devvit/redis';
- import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+```ts title="server/index.ts"
+import { redis } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
- router.post(
- "/internal/menu/redis-test",
- async (_req, res) => {
- const key = 'hello';
- await redis.set(key, 'world');
- const value = await redis.get(key);
- console.log(`${key}: ${value}`);
- res.json({ status: 'ok' });
- },
- );
- ```
+router.post(
+ "/internal/menu/redis-test",
+ async (_req, res) => {
+ const key = "hello";
+ await redis.set(key, "world");
+ const value = await redis.get(key);
+ console.log(`${key}: ${value}`);
+ res.json({ status: "ok" });
+ }
+);
+```
@@ -106,7 +112,6 @@ All limits are applied at a per-installation granularity.
-
### Games
You can take a look at this [Game Template](https://github.com/reddit/devvit-template-phaser/) to see a basic implementation of Redis in a game built with Phaser.JS
@@ -140,10 +145,11 @@ For all examples below, we assume that you already have obtained a Redis Client.
redis: true,
});
- //Then, in any function that has a reference to Devvit.Context:
- const redis = context.redis;
- ```
-
+//Then, in any function that has a reference to Devvit.Context:
+const redis = context.redis;
+
+````
+
@@ -159,28 +165,28 @@ For all examples below, we assume that you already have obtained a Redis Client.
| [rename](https://redis.io/commands/rename) | Renames a key. | None |
-
- Code Example
-
+
+ Code Example
+
```tsx
async function simpleReadWriteExample() {
- // Set a key
- await redis.set('color', 'red');
+// Set a key
+await redis.set('color', 'red');
- // Check if a key exists
- console.log('Key exists: ' + (await redis.exists('color')));
+// Check if a key exists
+console.log('Key exists: ' + (await redis.exists('color')));
- // Get a key
- console.log('Color: ' + (await redis.get('color')));
+// Get a key
+console.log('Color: ' + (await redis.get('color')));
- // Get the type of a key
- console.log('Type: ' + (await redis.type('color')));
+// Get the type of a key
+console.log('Type: ' + (await redis.type('color')));
- // Delete a key
- await redis.del('color');
+// Delete a key
+await redis.del('color');
}
-```
+````
```bash
Color: red
@@ -191,9 +197,9 @@ Type: string
### Batch read/write
-| **Command** | **Action** | **Limits** |
-| -------------------------------------- | ----------------------------------------------- | ---------- |
-| [mGet](https://redis.io/commands/mget) | Returns the values of all specified keys. | None |
+| **Command** | **Action** | **Limits** |
+| -------------------------------------- | ----------------------------------------------- | ------------------------------------------------------------------------- |
+| [mGet](https://redis.io/commands/mget) | Returns the values of all specified keys. | None |
| [mSet](https://redis.io/commands/mset) | Sets the given keys to their respective values. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -205,13 +211,13 @@ Type: string
async function batchReadWriteExample() {
// Set multiple keys at once
await redis.mSet({
- name: 'Devvit',
- occupation: 'Developer',
- yearsOfExperience: '9000',
+ name: "Devvit",
+ occupation: "Developer",
+ yearsOfExperience: "9000",
});
// Get multiple keys
- console.log('Result: ' + (await redis.mGet(['name', 'occupation'])));
+ console.log("Result: " + (await redis.mGet(["name", "occupation"])));
}
```
@@ -223,11 +229,11 @@ Result: Devvit,Developer
### Strings
-| **Command** | **Action** | **Limits** |
-| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------- |
-| [getRange](https://redis.io/commands/getrange) | Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). | None |
+| **Command** | **Action** | **Limits** |
+| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- |
+| [getRange](https://redis.io/commands/getrange) | Returns the substring of the string value stored at key, determined by the offsets start and end (both are inclusive). | None |
| [setRange](https://redis.io/commands/setrange) | Overwrites part of the string stored at key, starting at the specified offset, for the entire length of value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [strLen](https://redis.io/commands/strlen) | Returns the length of the string value stored at key. | None |
+| [strLen](https://redis.io/commands/strlen) | Returns the length of the string value stored at key. | None |
@@ -237,18 +243,20 @@ Result: Devvit,Developer
```tsx
async function stringsExample() {
// First, set 'word' to 'tacocat'
- await redis.set('word', 'tacocat');
+ await redis.set("word", "tacocat");
// Use getRange() to get the letters in 'word' between index 0 to 3, inclusive
- console.log('Range from index 0 to 3: ' + (await redis.getRange('word', 0, 3)));
+ console.log(
+ "Range from index 0 to 3: " + (await redis.getRange("word", 0, 3))
+ );
// Use setRange() to insert 'blue' at index 0
- await redis.setRange('word', 0, 'blue');
+ await redis.setRange("word", 0, "blue");
- console.log('Word after using setRange(): ' + (await redis.get('word')));
+ console.log("Word after using setRange(): " + (await redis.get("word")));
// Use strLen() to verify the word length
- console.log('Word length: ' + (await redis.strLen('word')));
+ console.log("Word length: " + (await redis.strLen("word")));
}
```
@@ -264,18 +272,18 @@ Word length: 7
Redis hashes can store up to ~ 4.2 billion key-value pairs. We recommend using hash for managing collections of key-value pairs whenever possible and iterating over it using a combination of `hscan`, `hkeys` and `hgetall`.
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------- | --------------------------------------------------------------------------------- | ---------- |
-| [hGet](https://redis.io/commands/hget) | Returns the value associated with field in the hash stored at key. | None |
-| [hMGet](https://redis.io/commands/hmget) | Returns the value of all specified field in the hash stored at multiple keys. | May be disabled for your app (allowlisted feature) |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------------------------------- |
+| [hGet](https://redis.io/commands/hget) | Returns the value associated with field in the hash stored at key. | None |
+| [hMGet](https://redis.io/commands/hmget) | Returns the value of all specified field in the hash stored at multiple keys. | May be disabled for your app (allowlisted feature) |
| [hSet](https://redis.io/commands/hset/) | Sets the specified fields to their respective values in the hash stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
| [hSetNX](https://redis.io/commands/hsetnx/) | Sets field in the hash stored at key to value, only if field does not yet exist.ƒ | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [hDel](https://redis.io/commands/hdel/) | Removes the specified fields from the hash stored at key. | None |
-| [hGetAll](https://redis.io/commands/hgetall/) | Returns a map of fields and their values stored in the hash. | None |
-| [hKeys](https://redis.io/commands/hkeys/) | Returns all field names in the hash stored at key. | None |
-| [hScan](https://redis.io/commands/hscan/) | Iterates fields of Hash types and their associated values. | No server-side cap; uses requested count |
+| [hDel](https://redis.io/commands/hdel/) | Removes the specified fields from the hash stored at key. | None |
+| [hGetAll](https://redis.io/commands/hgetall/) | Returns a map of fields and their values stored in the hash. | None |
+| [hKeys](https://redis.io/commands/hkeys/) | Returns all field names in the hash stored at key. | None |
+| [hScan](https://redis.io/commands/hscan/) | Iterates fields of Hash types and their associated values. | No server-side cap; uses requested count |
| [hIncrBy](https://redis.io/commands/hincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [hLen](https://redis.io/commands/hlen/) | Returns the number of fields contained in the hash stored at key. | None |
+| [hLen](https://redis.io/commands/hlen/) | Returns the number of fields contained in the hash stored at key. | None |
@@ -323,17 +331,24 @@ Number of fields deleted: 3
// Example using hGetAll()
async function hashExample2() {
// Set 'groceryList' to fields containing products with quantities
- await redis.hSet('groceryList', {
- eggs: '12',
- apples: '3',
- milk: '1',
+ await redis.hSet("groceryList", {
+ eggs: "12",
+ apples: "3",
+ milk: "1",
});
// Get the groceryList record
- const record = await redis.hGetAll('groceryList');
+ const record = await redis.hGetAll("groceryList");
if (record != undefined) {
- console.log('Eggs: ' + record.eggs + ', Apples: ' + record.apples + ', Milk: ' + record.milk);
+ console.log(
+ "Eggs: " +
+ record.eggs +
+ ", Apples: " +
+ record.apples +
+ ", Milk: " +
+ record.milk
+ );
}
}
```
@@ -349,13 +364,13 @@ Eggs: 12, Apples: 3, Milk: 1
```tsx
// Example using hKeys()
async function hashExample3() {
- await redis.hSet('prices', {
- chair: '48',
- desk: '95',
- whiteboard: '23',
+ await redis.hSet("prices", {
+ chair: "48",
+ desk: "95",
+ whiteboard: "23",
});
- console.log('Keys: ' + (await redis.hKeys('prices')));
+ console.log("Keys: " + (await redis.hKeys("prices")));
}
```
@@ -370,14 +385,14 @@ Keys: chair,desk,whiteboard
```tsx
// Example using hScan()
async function hashExample4() {
- await redis.hSet('userInfo', {
- name: 'Bob',
- startDate: '01-05-20',
- totalAwards: '12',
+ await redis.hSet("userInfo", {
+ name: "Bob",
+ startDate: "01-05-20",
+ totalAwards: "12",
});
// Scan and interate over all the fields within 'userInfo'
- const hScanResponse = await redis.hScan('userInfo', 0);
+ const hScanResponse = await redis.hScan("userInfo", 0);
hScanResponse.fieldValues.forEach((x) => {
console.log("Field: '" + x.field + "', Value: '" + x.value + "'");
@@ -399,10 +414,10 @@ Field: 'startDate', Value: '01-05-20'
// Example using hIncrBy()
async function hashExample5() {
// Set user123's karma to 100
- await redis.hSet('user123', { karma: '100' });
+ await redis.hSet("user123", { karma: "100" });
// Increase user123's karma by 5
- console.log('Updated karma: ' + (await redis.hIncrBy('user123', 'karma', 5)));
+ console.log("Updated karma: " + (await redis.hIncrBy("user123", "karma", 5)));
}
```
@@ -417,14 +432,14 @@ Updated karma: 105
```tsx
// Example using hLen()
async function hashExample6() {
- await redis.hSet('supplies', {
- paperclips: '25',
- pencils: '10',
- erasers: '5',
- pens: '7',
+ await redis.hSet("supplies", {
+ paperclips: "25",
+ pencils: "10",
+ erasers: "5",
+ pens: "7",
});
- console.log('Number of fields: ' + (await redis.hLen('supplies')));
+ console.log("Number of fields: " + (await redis.hLen("supplies")));
}
```
@@ -436,8 +451,8 @@ Number of fields: 4
### Numbers
-| **Command** | **Action** | **Limits** |
-| ------------------------------------------ | ------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| ------------------------------------------ | ------------------------------------------------- | ------------------------------------------------------------------------- |
| [incrBy](https://redis.io/commands/incrby) | Increments the number stored at key by increment. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -447,9 +462,9 @@ Number of fields: 4
```tsx
async function numbersExample() {
- await redis.set('totalPoints', '53');
+ await redis.set("totalPoints", "53");
- console.log('Updated points: ' + (await redis.incrBy('totalPoints', 100)));
+ console.log("Updated points: " + (await redis.incrBy("totalPoints", 100)));
}
```
@@ -463,8 +478,8 @@ Updated points: 153
| **Command** | **Action** | **Limits** |
| --------------------------------------------------- | ----------------------------------------------------------------- | ---------- |
-| [expire](https://redis.io/commands/expire/) | Sets a timeout on key. | None |
-| [expireTime](https://redis.io/commands/expiretime/) | Returns the remaining seconds at which the given key will expire. | None |
+| [expire](https://redis.io/commands/expire/) | Sets a timeout on key. | None |
+| [expireTime](https://redis.io/commands/expiretime/) | Returns the remaining seconds at which the given key will expire. | None |
@@ -474,16 +489,16 @@ Updated points: 153
```tsx
async function keyExpirationExample() {
// Set a key 'product' with value 'milk'
- await redis.set('product', 'milk');
+ await redis.set("product", "milk");
// Get the current expireTime for the product
- console.log('Expire time: ' + (await redis.expireTime('product')));
+ console.log("Expire time: " + (await redis.expireTime("product")));
// Set the product to expire in 60 seconds
- await redis.expire('product', 60);
+ await redis.expire("product", 60);
// Get the updated expireTime for the product
- console.log('Updated expire time: ' + (await redis.expireTime('product')));
+ console.log("Updated expire time: " + (await redis.expireTime("product")));
}
```
@@ -508,13 +523,13 @@ You can sequence all of the above steps in a single transaction using `multi` an
If an error occurs inside a transaction before `exec` is called, Redis discards the transaction automatically. See the Redis docs: [Errors inside a transaction](https://redis.io/docs/latest/develop/interact/transactions/#errors-inside-a-transaction) for more info.
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------- |
| [multi](https://redis.io/commands/multi/) | Marks the start of a transaction block. | Max concurrent transactions per installation: 20 (default) |
-| [exec](https://redis.io/commands/exec/) | Executes all previously queued commands in a transaction and restores the connection state to normal. | Transaction execution timeout: 5 seconds |
-| [discard](https://redis.io/commands/discard/) | Flushes all previously queued commands in a transaction and restores the connection state to normal. | None |
-| [watch](https://redis.io/commands/watch/) | Marks the given keys to be watched for conditional execution of a transaction. `watch` returns a [TxClientLike](https://developers.reddit.com/docs/api/public-api/#-txclientlike) which should be used to call Redis commands in a transaction. | None |
-| [unwatch](https://redis.io/commands/unwatch/) | Flushes all the previously watched keys for a transaction. | None |
+| [exec](https://redis.io/commands/exec/) | Executes all previously queued commands in a transaction and restores the connection state to normal. | Transaction execution timeout: 5 seconds |
+| [discard](https://redis.io/commands/discard/) | Flushes all previously queued commands in a transaction and restores the connection state to normal. | None |
+| [watch](https://redis.io/commands/watch/) | Marks the given keys to be watched for conditional execution of a transaction. `watch` returns a [TxClientLike](https://developers.reddit.com/docs/api/public-api/#-txclientlike) which should be used to call Redis commands in a transaction. | None |
+| [unwatch](https://redis.io/commands/unwatch/) | Flushes all the previously watched keys for a transaction. | None |
@@ -526,18 +541,18 @@ If an error occurs inside a transaction before `exec` is called, Redis discards
```tsx
// Example using exec()
async function transactionsExample1() {
- await redis.mSet({ quantity: '5', karma: '32' });
+ await redis.mSet({ quantity: "5", karma: "32" });
- const txn = await redis.watch('quantity');
+ const txn = await redis.watch("quantity");
await txn.multi(); // Begin a transaction
- await txn.incrBy('karma', 10);
- await txn.set('name', 'Devvit');
+ await txn.incrBy("karma", 10);
+ await txn.set("name", "Devvit");
await txn.exec(); // Execute the commands in the transaction
console.log(
- 'Keys after completing transaction: ' +
- (await redis.mGet(['quantity', 'karma', 'name']))
+ "Keys after completing transaction: " +
+ (await redis.mGet(["quantity", "karma", "name"]))
);
}
```
@@ -553,15 +568,15 @@ Keys after completing transaction: 5,42,Devvit
```tsx
// Example using discard()
async function transactionsExample2() {
- await redis.set('price', '25');
+ await redis.set("price", "25");
- const txn = await redis.watch('price');
+ const txn = await redis.watch("price");
await txn.multi(); // Begin a transaction
- await txn.incrBy('price', 5);
+ await txn.incrBy("price", 5);
await txn.discard(); // Discard the commands in the transaction
- console.log('Price value: ' + (await redis.get('price'))); // 'price' should still be '25'
+ console.log("Price value: " + (await redis.get("price"))); // 'price' should still be '25'
}
```
@@ -576,21 +591,21 @@ Price value: 25
```tsx
// Example using unwatch()
async function transactionsExample3() {
- await redis.set('gold', '50');
+ await redis.set("gold", "50");
- const txn = await redis.watch('gold');
+ const txn = await redis.watch("gold");
await txn.multi(); // Begin a transaction
- await txn.incrBy('gold', 30);
+ await txn.incrBy("gold", 30);
await txn.unwatch(); // Unwatch "gold"
// Now that "gold" has been unwatched, we can increment its value
// outside the transaction without canceling the transaction
- await redis.incrBy('gold', -20);
+ await redis.incrBy("gold", -20);
await txn.exec(); // Execute the commands in the transaction
- console.log('Gold value: ' + (await redis.get('gold'))); // The value of 'gold' should be 50 + 30 - 20 = 60
+ console.log("Gold value: " + (await redis.get("gold"))); // The value of 'gold' should be 50 + 30 - 20 = 60
}
```
@@ -602,19 +617,19 @@ Gold value: 60
### Sorted set
-| **Command** | **Action** | **Limits** |
-| --------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- |
-| [zAdd](https://redis.io/commands/zadd/) | Adds all the specified members with the specified scores to the sorted set stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [zCard](https://redis.io/commands/zcard) | Returns the sorted set cardinality (number of elements) of the sorted set stored at key. | None |
+| **Command** | **Action** | **Limits** |
+| --------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [zAdd](https://redis.io/commands/zadd/) | Adds all the specified members with the specified scores to the sorted set stored at key. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
+| [zCard](https://redis.io/commands/zcard) | Returns the sorted set cardinality (number of elements) of the sorted set stored at key. | None |
| [zRange](https://redis.io/commands/zrange/) | Returns the specified range of elements in the sorted set stored at key.
When using `by: 'lex'`, the start and stop inputs will be prepended with `[` by default, unless they already begin with `[`, `(` or are one of the special values `+` or `-`. | BYSCORE/BYLEX: LIMIT count capped to 1000 per call (server default). RANK: no server cap. Client default for by: 'score'/'lex' is count=1000 when no limit is provided. |
-| [zRem](https://redis.io/commands/zrem/) | Removes the specified members from the sorted set stored at key. | None |
-| [zScore](https://redis.io/commands/zscore/) | Returns the score of member in the sorted set at key. | None |
-| [zRank](https://redis.io/commands/zrank/) | Returns the rank of member in the sorted set stored at key. | None |
-| [zIncrBy](https://redis.io/commands/zincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
-| [zScan](https://redis.io/commands/zscan/) | Iterates elements of sorted set types and their associated scores. Note that there is no guaranteed ordering of elements in the result. | No server-side cap; uses requested count |
-| [zRemRangeByLex](https://redis.io/commands/zremrangebylex/) | When all elements in a sorted set are inserted with the same score, this command removes the elements at key between the lexicographical range specified by min and max. | None |
-| [zRemRangeByRank](https://redis.io/commands/zremrangebyrank/) | Removes all elements in the sorted set stored at key with rank between start and stop. | None |
-| [zRemRangeByScore](https://redis.io/commands/zremrangebyscore/) | Removes all elements in the sorted set stored at key with a score between min and max (inclusive). | None |
+| [zRem](https://redis.io/commands/zrem/) | Removes the specified members from the sorted set stored at key. | None |
+| [zScore](https://redis.io/commands/zscore/) | Returns the score of member in the sorted set at key. | None |
+| [zRank](https://redis.io/commands/zrank/) | Returns the rank of member in the sorted set stored at key. | None |
+| [zIncrBy](https://redis.io/commands/zincrby/) | Increments the score of member in the sorted set stored at key by value. | Subject to storage quota gating (writes may be blocked if quota exceeded) |
+| [zScan](https://redis.io/commands/zscan/) | Iterates elements of sorted set types and their associated scores. Note that there is no guaranteed ordering of elements in the result. | No server-side cap; uses requested count |
+| [zRemRangeByLex](https://redis.io/commands/zremrangebylex/) | When all elements in a sorted set are inserted with the same score, this command removes the elements at key between the lexicographical range specified by min and max. | None |
+| [zRemRangeByRank](https://redis.io/commands/zremrangebyrank/) | Removes all elements in the sorted set stored at key with rank between start and stop. | None |
+| [zRemRangeByScore](https://redis.io/commands/zremrangebyscore/) | Removes all elements in the sorted set stored at key with a score between min and max (inclusive). | None |
@@ -627,29 +642,31 @@ Gold value: 60
// Example using zRange() with by 'score'
async function sortedSetExample1() {
await redis.zAdd(
- 'leaderboard',
- { member: 'louis', score: 37 },
- { member: 'fernando', score: 10 },
- { member: 'caesar', score: 20 },
- { member: 'alexander', score: 25 }
+ "leaderboard",
+ { member: "louis", score: 37 },
+ { member: "fernando", score: 10 },
+ { member: "caesar", score: 20 },
+ { member: "alexander", score: 25 }
);
// Cardinality should be '4' as there are 4 elements in the leaderboard set
- console.log('Cardinality: ' + (await redis.zCard('leaderboard')));
+ console.log("Cardinality: " + (await redis.zCard("leaderboard")));
// View elements with scores between 0 and 30 inclusive, sorted by score
- let scores = await redis.zRange('leaderboard', 0, 30, { by: 'score' });
- console.log('Scores: ' + JSON.stringify(scores));
+ let scores = await redis.zRange("leaderboard", 0, 30, { by: "score" });
+ console.log("Scores: " + JSON.stringify(scores));
// Remove 'fernando' from the leaderboard
- await redis.zRem('leaderboard', ['fernando']);
+ await redis.zRem("leaderboard", ["fernando"]);
// View the elements sorted by score again. This time 'fernando' should not appear in the output
- scores = await redis.zRange('leaderboard', 0, 30, { by: 'score' });
- console.log('Updated scores: ' + JSON.stringify(scores));
+ scores = await redis.zRange("leaderboard", 0, 30, { by: "score" });
+ console.log("Updated scores: " + JSON.stringify(scores));
// View caesar's score
- console.log("Caesar's score: " + (await redis.zScore('leaderboard', 'caesar')));
+ console.log(
+ "Caesar's score: " + (await redis.zScore("leaderboard", "caesar"))
+ );
}
```
@@ -668,17 +685,19 @@ Caesar's score: 20
// Example using zRange() with by 'lex'
async function sortedSetExample2() {
await redis.zAdd(
- 'checkpoints',
- { member: 'delta', score: 0 },
- { member: 'omega', score: 0 },
- { member: 'alpha', score: 0 },
- { member: 'charlie', score: 0 }
+ "checkpoints",
+ { member: "delta", score: 0 },
+ { member: "omega", score: 0 },
+ { member: "alpha", score: 0 },
+ { member: "charlie", score: 0 }
);
// View elements between the words 'alpha' and 'fox' inclusive, sorted lexicographically
// Note that 'by: "lex"' only works if all elements have the same score
- const members = await redis.zRange('checkpoints', 'alpha', 'fox', { by: 'lex' });
- console.log('Members: ' + JSON.stringify(members));
+ const members = await redis.zRange("checkpoints", "alpha", "fox", {
+ by: "lex",
+ });
+ console.log("Members: " + JSON.stringify(members));
}
```
@@ -694,17 +713,17 @@ Members: [{"score":0,"member":"alpha"},{"score":0,"member":"charlie"},{"score":0
// Example using zRange() with by 'rank'
async function sortedSetExample3() {
await redis.zAdd(
- 'grades',
- { member: 'sam', score: 80 },
- { member: 'norma', score: 95 },
- { member: 'alex', score: 77 },
- { member: 'don', score: 84 },
- { member: 'zeek', score: 92 }
+ "grades",
+ { member: "sam", score: 80 },
+ { member: "norma", score: 95 },
+ { member: "alex", score: 77 },
+ { member: "don", score: 84 },
+ { member: "zeek", score: 92 }
);
// View elements with a rank between 2 and 4 inclusive. Note that ranks start at index 0.
- const members = await redis.zRange('grades', 2, 4, { by: 'rank' });
- console.log('Members: ' + JSON.stringify(members));
+ const members = await redis.zRange("grades", 2, 4, { by: "rank" });
+ console.log("Members: " + JSON.stringify(members));
}
```
@@ -720,26 +739,27 @@ Members: [{"score":84,"member":"don"},{"score":92,"member":"zeek"},{"score":95,"
// Example using zRank() and zIncrBy()
async function sortedSetExample4() {
await redis.zAdd(
- 'animals',
- { member: 'zebra', score: 92 },
- { member: 'cat', score: 100 },
- { member: 'dog', score: 95 },
- { member: 'elephant', score: 97 }
+ "animals",
+ { member: "zebra", score: 92 },
+ { member: "cat", score: 100 },
+ { member: "dog", score: 95 },
+ { member: "elephant", score: 97 }
);
// View the rank of 'dog' in the animals set
// Rank should be '1' since 'dog' has the second lowest score. Note that ranks start at index 0.
- console.log("Dog's rank: " + (await redis.zRank('animals', 'dog')));
+ console.log("Dog's rank: " + (await redis.zRank("animals", "dog")));
// View the rank of 'zebra'
- console.log("Zebra's rank: " + (await redis.zRank('animals', 'zebra')));
+ console.log("Zebra's rank: " + (await redis.zRank("animals", "zebra")));
// Increase the score of 'dog' by 10
- await redis.zIncrBy('animals', 'dog', 10);
+ await redis.zIncrBy("animals", "dog", 10);
// View the rank of 'dog' again. This time it should be '3' because dog has the highest score.
console.log(
- "Dog's rank after incrementing score: " + (await redis.zRank('animals', 'dog'))
+ "Dog's rank after incrementing score: " +
+ (await redis.zRank("animals", "dog"))
);
}
```
@@ -758,21 +778,21 @@ Dog's rank after incrementing score: 3
// Example using zRemRangeByLex()
async function sortedSetExample5() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 0 },
- { member: 'mango', score: 0 },
- { member: 'banana', score: 0 },
- { member: 'orange', score: 0 },
- { member: 'apple', score: 0 }
+ "fruits",
+ { member: "kiwi", score: 0 },
+ { member: "mango", score: 0 },
+ { member: "banana", score: 0 },
+ { member: "orange", score: 0 },
+ { member: "apple", score: 0 }
);
// Remove fruits alphabetically ordered between 'kiwi' inclusive and 'orange' exclusive
// Note: The symbols '[' and '(' indicate inclusive or exclusive, respectively. These must be included in the call to zRemRangeByLex().
- await redis.zRemRangeByLex('fruits', '[kiwi', '(orange');
+ await redis.zRemRangeByLex("fruits", "[kiwi", "(orange");
// Only 'apple', 'banana', and 'orange' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -788,20 +808,20 @@ zScanResponse: {"cursor":0,"members":[{"score":0,"member":"apple"},{"score":0,"m
// Example using zRemRangeByRank()
async function sortedSetExample6() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 10 },
- { member: 'mango', score: 20 },
- { member: 'banana', score: 30 },
- { member: 'orange', score: 40 },
- { member: 'apple', score: 50 }
+ "fruits",
+ { member: "kiwi", score: 10 },
+ { member: "mango", score: 20 },
+ { member: "banana", score: 30 },
+ { member: "orange", score: 40 },
+ { member: "apple", score: 50 }
);
// Remove fruits ranked 1 through 3 inclusive
- await redis.zRemRangeByRank('fruits', 1, 3);
+ await redis.zRemRangeByRank("fruits", 1, 3);
// Only 'kiwi' and 'apple' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -817,20 +837,20 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":50,"
// Example using zRemRangeByScore() example
async function sortedSetExample7() {
await redis.zAdd(
- 'fruits',
- { member: 'kiwi', score: 10 },
- { member: 'mango', score: 20 },
- { member: 'banana', score: 30 },
- { member: 'orange', score: 40 },
- { member: 'apple', score: 50 }
+ "fruits",
+ { member: "kiwi", score: 10 },
+ { member: "mango", score: 20 },
+ { member: "banana", score: 30 },
+ { member: "orange", score: 40 },
+ { member: "apple", score: 50 }
);
// Remove fruits scored between 30 and 50 inclusive
- await redis.zRemRangeByScore('fruits', 30, 50);
+ await redis.zRemRangeByScore("fruits", 30, 50);
// Only 'kiwi' and 'mango' should remain in the set
- const zScanResponse = await redis.zScan('fruits', 0);
- console.log('zScanResponse: ' + JSON.stringify(zScanResponse));
+ const zScanResponse = await redis.zScan("fruits", 0);
+ console.log("zScanResponse: " + JSON.stringify(zScanResponse));
}
```
@@ -842,8 +862,8 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":20,"
### Bitfield
-| **Command** | **Action** | **Limits** |
-| ----------------------------------------------------------- | ------------------------------------------------- | ---------- |
+| **Command** | **Action** | **Limits** |
+| ----------------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------------------------------- |
| [bitfield](https://redis.io/docs/latest/commands/bitfield/) | Performs a sequence of operations on a bit string | Subject to storage quota gating (writes may be blocked if quota exceeded) |
@@ -853,40 +873,40 @@ zScanResponse: {"cursor":0,"members":[{"score":10,"member":"kiwi"},{"score":20,"
```tsx
async function bitfieldExample() {
- const setBits: number[] = await redis.bitfield('foo', 'set', 'i5', '#0', 11);
- console.log('Set result: ' + setBits); // [0]
+ const setBits: number[] = await redis.bitfield("foo", "set", "i5", "#0", 11);
+ console.log("Set result: " + setBits); // [0]
- const getBits: number[] = await redis.bitfield('foo', 'get', 'i5', '#0');
- console.log('Get result: ' + setBits); // [11]
+ const getBits: number[] = await redis.bitfield("foo", "get", "i5", "#0");
+ console.log("Get result: " + setBits); // [11]
const manyOperations: number[] = await redis.bitfield(
- 'bar',
- 'set',
- 'u2',
+ "bar",
+ "set",
+ "u2",
0,
3,
- 'get',
- 'u2',
+ "get",
+ "u2",
0,
- 'incrBy',
- 'u2',
+ "incrBy",
+ "u2",
0,
1,
- 'overflow',
- 'sat',
- 'get',
- 'u2',
+ "overflow",
+ "sat",
+ "get",
+ "u2",
0,
- 'set',
- 'u2',
+ "set",
+ "u2",
0,
3,
- 'incrBy',
- 'u2',
+ "incrBy",
+ "u2",
0,
1
);
- console.log('Results of many operations: ' + manyOperations); // [0, 3, 0, 0, 3, 3]
+ console.log("Results of many operations: " + manyOperations); // [0, 3, 0, 0, 3, 3]
}
```
@@ -905,7 +925,7 @@ To use it, update your import:
```ts
// import { redis } from '@devvit/redis';
-import { redisCompressed as redis } from '@devvit/redis';
+import { redisCompressed as redis } from "@devvit/redis";
```
:::warning
@@ -913,6 +933,7 @@ import { redisCompressed as redis } from '@devvit/redis';
:::
The `redisCompressed` client automatically:
+
- Compresses values on write (`set`, `hSet`, `mSet`, `hSetNX`) if it saves space.
- Decompresses values on read (`get`, `hGet`, `mGet`, `hMGet`, `hGetAll`).
@@ -964,10 +985,15 @@ Add these route handlers to your server.
```ts
-import { redis, scheduler, type TaskRequest, type TaskResponse } from '@devvit/web/server';
+import {
+ redis,
+ scheduler,
+ type TaskRequest,
+ type TaskResponse,
+} from "@devvit/web/server";
// Import the compressed client
-import { redisCompressed } from '@devvit/redis';
-import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+import { redisCompressed } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
type MigrateExampleFormRequest = {
startCursor?: string;
@@ -980,28 +1006,28 @@ type MigrateExampleJobData = {
processed?: number;
};
-const MY_DATA_HASH_KEY = 'my:app:large:dataset';
+const MY_DATA_HASH_KEY = "my:app:large:dataset";
// 1. Menu Endpoint: Returns the form definition
-app.post('/internal/menu/ops/migrate-example', async (c) => {
+app.post("/internal/menu/ops/migrate-example", async (c) => {
const _request = await c.req.json();
return c.json({
showForm: {
- name: 'migrateExampleForm', // Must match key in devvit.json "forms"
+ name: "migrateExampleForm", // Must match key in devvit.json "forms"
form: {
- title: 'Migrate Hash to Compression',
- acceptLabel: 'Start Migration',
+ title: "Migrate Hash to Compression",
+ acceptLabel: "Start Migration",
fields: [
{
- name: 'startCursor',
- label: 'Start Cursor (0 for beginning)',
- type: 'string',
- defaultValue: '0',
+ name: "startCursor",
+ label: "Start Cursor (0 for beginning)",
+ type: "string",
+ defaultValue: "0",
},
{
- name: 'chunkSize',
- label: 'Items per batch',
- type: 'number',
+ name: "chunkSize",
+ label: "Items per batch",
+ type: "number",
defaultValue: 20000,
},
],
@@ -1011,18 +1037,20 @@ app.post('/internal/menu/ops/migrate-example', async (c) => {
});
// 2. Form Handler: Receives input and schedules the first job
-app.post('/internal/form/ops/migrate-example', async (c) => {
- const body = await c.req.json().catch(
- () => ({} as MigrateExampleFormRequest)
- );
- const cursor = body.startCursor || '0';
+app.post("/internal/form/ops/migrate-example", async (c) => {
+ const body = await c.req
+ .json()
+ .catch(() => ({} as MigrateExampleFormRequest));
+ const cursor = body.startCursor || "0";
const size = Number(body.chunkSize) || 20000;
- console.log(`[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`);
+ console.log(
+ `[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`
+ );
// Kick off the first job in the chain
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(), // Run immediately
data: {
cursor,
@@ -1033,27 +1061,29 @@ app.post('/internal/form/ops/migrate-example', async (c) => {
return c.json({
showToast: {
- text: 'Migration started in background',
- appearance: 'success',
+ text: "Migration started in background",
+ appearance: "success",
},
});
});
// 3. Scheduler Endpoint: The recursive worker
-app.post('/internal/scheduler/migrate-example-data', async (c) => {
+app.post("/internal/scheduler/migrate-example-data", async (c) => {
const startTime = Date.now();
try {
- const body = await c.req.json>().catch(
- () => ({} as TaskRequest)
- );
+ const body = await c.req
+ .json>()
+ .catch(() => ({} as TaskRequest));
const data = body.data;
let cursor = Number(data?.cursor) || 0;
const chunkSize = Number(data?.chunkSize) || 20000;
const processedTotal = Number(data?.processed) || 0;
- console.log(`[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`);
+ console.log(
+ `[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`
+ );
let keepRunning = true;
let processedInJob = 0;
@@ -1100,7 +1130,7 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
// Safety: Check execution time.
// If we are close to 30s (Devvit limit), stop early and requeue.
if (Date.now() - startTime > 20000) {
- console.log('[Migration] Time limit approaching, stopping early.');
+ console.log("[Migration] Time limit approaching, stopping early.");
keepRunning = false;
}
}
@@ -1111,9 +1141,11 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
// If the cursor is not 0, we still have more data to scan.
// We schedule *this same job* to run again immediately.
if (cursor !== 0) {
- console.log(`[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`);
+ console.log(
+ `[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`
+ );
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(),
data: {
cursor,
@@ -1122,14 +1154,21 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
},
});
- return c.json({ status: 'requeued', processed: newTotal, cursor });
+ return c.json({
+ status: "requeued",
+ processed: newTotal,
+ cursor,
+ });
}
console.log(`[Migration] COMPLETE. Total items processed: ${newTotal}`);
- return c.json({ status: 'success', processed: newTotal });
+ return c.json({ status: "success", processed: newTotal });
} catch (error) {
- console.error('[Migration] Critical Job Error', error);
- return c.json({ status: 'error', message: error.message }, 500);
+ console.error("[Migration] Critical Job Error", error);
+ return c.json(
+ { status: "error", message: error.message },
+ 500
+ );
}
});
```
@@ -1138,10 +1177,15 @@ app.post('/internal/scheduler/migrate-example-data', async (c) => {
```ts
-import { redis, scheduler, type TaskRequest, type TaskResponse } from '@devvit/web/server';
+import {
+ redis,
+ scheduler,
+ type TaskRequest,
+ type TaskResponse,
+} from "@devvit/web/server";
// Import the compressed client
-import { redisCompressed } from '@devvit/redis';
-import type { MenuItemRequest, UiResponse } from '@devvit/web/shared';
+import { redisCompressed } from "@devvit/redis";
+import type { MenuItemRequest, UiResponse } from "@devvit/web/shared";
type MigrateExampleFormRequest = {
startCursor?: string;
@@ -1154,51 +1198,54 @@ type MigrateExampleJobData = {
processed?: number;
};
-const MY_DATA_HASH_KEY = 'my:app:large:dataset';
+const MY_DATA_HASH_KEY = "my:app:large:dataset";
// 1. Menu Endpoint: Returns the form definition
app.post(
- '/internal/menu/ops/migrate-example',
+ "/internal/menu/ops/migrate-example",
async (_req, res) => {
res.json({
showForm: {
- name: 'migrateExampleForm', // Must match key in devvit.json "forms"
+ name: "migrateExampleForm", // Must match key in devvit.json "forms"
form: {
- title: 'Migrate Hash to Compression',
- acceptLabel: 'Start Migration',
+ title: "Migrate Hash to Compression",
+ acceptLabel: "Start Migration",
fields: [
{
- name: 'startCursor',
- label: 'Start Cursor (0 for beginning)',
- type: 'string',
- defaultValue: '0',
+ name: "startCursor",
+ label: "Start Cursor (0 for beginning)",
+ type: "string",
+ defaultValue: "0",
},
{
- name: 'chunkSize',
- label: 'Items per batch',
- type: 'number',
+ name: "chunkSize",
+ label: "Items per batch",
+ type: "number",
defaultValue: 20000,
},
],
},
},
});
- },
+ }
);
// 2. Form Handler: Receives input and schedules the first job
app.post(
- '/internal/form/ops/migrate-example',
+ "/internal/form/ops/migrate-example",
async (req, res) => {
- const { startCursor, chunkSize } = req.body ?? ({} as MigrateExampleFormRequest);
- const cursor = startCursor || '0';
+ const { startCursor, chunkSize } =
+ req.body ?? ({} as MigrateExampleFormRequest);
+ const cursor = startCursor || "0";
const size = Number(chunkSize) || 20000;
- console.log(`[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`);
+ console.log(
+ `[Migration] Manual start requested. Cursor: ${cursor}, Chunk: ${size}`
+ );
// Kick off the first job in the chain
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(), // Run immediately
data: {
cursor,
@@ -1209,16 +1256,16 @@ app.post(
res.json({
showToast: {
- text: 'Migration started in background',
- appearance: 'success',
+ text: "Migration started in background",
+ appearance: "success",
},
});
- },
+ }
);
// 3. Scheduler Endpoint: The recursive worker
app.post>(
- '/internal/scheduler/migrate-example-data',
+ "/internal/scheduler/migrate-example-data",
async (req, res) => {
const startTime = Date.now();
@@ -1229,7 +1276,9 @@ app.post>(
const chunkSize = Number(data?.chunkSize) || 20000;
const processedTotal = Number(data?.processed) || 0;
- console.log(`[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`);
+ console.log(
+ `[Migration] Job started. Cursor: ${cursor}, Target Chunk: ${chunkSize}`
+ );
let keepRunning = true;
let processedInJob = 0;
@@ -1276,7 +1325,7 @@ app.post>(
// Safety: Check execution time.
// If we are close to 30s (Devvit limit), stop early and requeue.
if (Date.now() - startTime > 20000) {
- console.log('[Migration] Time limit approaching, stopping early.');
+ console.log("[Migration] Time limit approaching, stopping early.");
keepRunning = false;
}
}
@@ -1287,9 +1336,11 @@ app.post>(
// If the cursor is not 0, we still have more data to scan.
// We schedule *this same job* to run again immediately.
if (cursor !== 0) {
- console.log(`[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`);
+ console.log(
+ `[Migration] Requeueing. Next cursor: ${cursor}. Processed so far: ${newTotal}`
+ );
await scheduler.runJob({
- name: 'migrate-example-data',
+ name: "migrate-example-data",
runAt: new Date(),
data: {
cursor,
@@ -1298,16 +1349,16 @@ app.post>(
},
});
- res.json({ status: 'requeued', processed: newTotal, cursor });
+ res.json({ status: "requeued", processed: newTotal, cursor });
} else {
console.log(`[Migration] COMPLETE. Total items processed: ${newTotal}`);
- res.json({ status: 'success', processed: newTotal });
+ res.json({ status: "success", processed: newTotal });
}
} catch (error) {
- console.error('[Migration] Critical Job Error', error);
- res.status(500).json({ status: 'error', message: error.message });
+ console.error("[Migration] Critical Job Error", error);
+ res.status(500).json({ status: "error", message: error.message });
}
- },
+ }
);
```