Stream Feeds has support for moderation, allowing you to manage user interactions, content moderation, and platform safety. It's accessible through the client.moderation property.
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
Ban a user from the platform with various options including timeout, shadow bans, and IP bans.
let banRequest = BanRequest( targetUserId: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ipBan: false)let response = try await client.moderation.ban(banRequest: banRequest)print("User banned for: \(response.duration)")
val banRequest = BanRequest( targetUserId = "user-123", reason = "Violation of community guidelines", timeout = 3600, // 1 hour in seconds shadow = false, ipBan = false)val response: Result<BanResponse> = client.moderation.ban(banRequest = banRequest)println("User banned for: ${response.getOrNull()?.duration}")
await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false,});
await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false,});
await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false,});
const banRequest = BanRequest( targetUserId: 'user-123', reason: 'Violation of community guidelines', timeout: 3600, // 1 hour in seconds shadow: false, ipBan: false,);final response = await client.moderation.ban(banRequest: banRequest);print('User banned for: ${response.getOrThrow().duration}');
await client.moderation.ban({ target_user_id: "user-123", banned_by_id: "<banned by user id>", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false,});
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$banRequest = new GeneratedModels\BanRequest( targetUserID: "user123", reason: "Violation of community guidelines", timeout: 60, // 1 hour in minutes shadow: false, ipBan: false, bannedByID: "eric");$response = $moderationClient->ban($banRequest);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
Parameters:
targetUserId: The ID of the user to ban
reason: Optional reason for the ban
timeout: Optional timeout in seconds (null for permanent ban)
shadow: Whether to perform a shadow ban (user doesn't know they're banned)
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$muteRequest = new GeneratedModels\MuteRequest( targetIds: ["user123", "user456"], // 24 hours in minutes timeout: 1440, userID: "moderator");$response = $moderationClient->mute($muteRequest);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
Parameters:
targetIds: Array of user IDs to mute
timeout: Optional timeout in seconds (null for permanent mute)
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$blockRequest = new GeneratedModels\BlockUsersRequest( blockedUserID: "user123", userID: "moderator");$response = $moderationClient->blockUsers($blockRequest);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
client.unblockUsers({ blocked_user_id: "user-123", user_id: "<moderator user id>",});
// Coming soon
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$request = new GeneratedModels\UnblockUsersRequest( blockedUserID: "user123", userID: "moderator");$response = $moderationClient->unblockUsers($request);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
final blockedUsers = await client.moderation.getBlockedUsers();for (final user in blockedUsers.getOrThrow().blocks) { print('Blocked user: ${user.blockedUserId}');}
client.getBlockedUsers({ user_id: "<moderator user id>" });
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
Parameters:
entityId: The ID of the content to flag
entityType: The type of content (e.g., "stream:feeds:v3:activity", "stream:feeds:v3:comment")
reason: Optional reason for flagging
entityCreatorId: Optional ID of the content creator
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$actionRequest = new GeneratedModels\SubmitActionRequest( // Action details for moderation userID: "moderator");$response = $moderationClient->submitAction($actionRequest);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
let queryRequest = QueryReviewQueueRequest( // Query parameters for filtering and pagination)let reviewQueue = try await client.moderation.queryReviewQueue(queryReviewQueueRequest: queryRequest)
val queryRequest = QueryReviewQueueRequest( // Query parameters for filtering and pagination)val reviewQueue: Result<QueryReviewQueueResponse> = client.moderation.queryReviewQueue(queryRequest)
client.moderation.queryReviewQueue({ // Query parameters for filtering and pagination});
client.moderation.queryReviewQueue({ // Query parameters for filtering and pagination});
client.moderation.queryReviewQueue({ // Query parameters for filtering and pagination});
const queryRequest = QueryReviewQueueRequest( // Query parameters for filtering and pagination );final reviewQueue = await client.moderation .queryReviewQueue(queryReviewQueueRequest: queryRequest);
client.moderation.queryReviewQueue({ // Query parameters for filtering and pagination user_id: user.id,});
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
$configRequest = new GeneratedModels\UpsertConfigRequest( key: "feeds", blockListConfig: new GeneratedModels\BlockListConfig( enabled: true, rules: [ new GeneratedModels\BlockListRule( name: "blocklist", action: "remove", ) ] ), userID: "moderator");$response = $moderationClient->upsertConfig($configRequest);
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)
All moderation methods can throw errors. Handle them appropriately:
do { let response = try await client.moderation.ban(banRequest: banRequest) print("User banned successfully")} catch { print("Failed to ban user: \(error)")}
// Functions that can result in errors, return Kotlin `Result` objectsval result: Result<BanResponse> = client.moderation.ban(banRequest)
try { const response = await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false, }); console.log(`Banned successfully`);} catch (e) { console.error(`Failed to ban user, error: ${e}`);}
try { const response = await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false, }); console.log(`Banned successfully`);} catch (e) { console.error(`Failed to ban user, error: ${e}`);}
try { const response = await client.moderation.ban({ target_user_id: "user-123", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false, }); console.log(`Banned successfully`);} catch (e) { console.error(`Failed to ban user, error: ${e}`);}
final response = await client.moderation.ban(banRequest: banRequest);switch (response) { case Success(data: final banResponse): print('User banned for: ${banResponse.duration}'); case Failure(error: final error): print('Error banning user: $error');}
try { const response = await client.moderation.ban({ target_user_id: "user-123", banned_by_id: "<banned by user id>", reason: "Violation of community guidelines", timeout: 3600, // 1 hour in seconds shadow: false, ip_ban: false, }); console.log(`Banned successfully`);} catch (e) { console.error(`Failed to ban user, error: ${e}`);}
ActivityFeedbackRequest moderationRequest = ActivityFeedbackRequest.builder() .report(true) .reason("inappropriate_content") .userID(testUserId2) // Different user reporting .build();ActivityFeedbackResponse moderationResponse = feeds.activityFeedback(activityId, moderationRequest).execute().getData();
try { $banRequest = new GeneratedModels\BanRequest( targetUserID: "user-123", reason: "Violation of community guidelines", timeout: 60, // 1 hour in minutes shadow: false, ipBan: false, bannedByID: "moderator" ); $response = $moderationClient->ban($banRequest); echo "User banned successfully\n";} catch (Exception $e) { echo "Failed to ban user: " . $e->getResponseBody() . "\n";}
// Note: Moderation typically requires admin permissions// This test demonstrates the API structureConsole.WriteLine($"Activity {activityId} would be moderated here");// In a real scenario, you would call moderation endpoints
moderation_response = self.client.feeds.activity_feedback( activity_id, report=True, reason="inappropriate_content", user_id=self.test_user_id_2, # Different user reporting)