let client = FeedsClient(apiKey: apiKey, user: user, token: token)
let moderation = client.moderationModeration
Stream Feeds has support for moderation, allowing you to manage user interactions, content moderation, and platform safety. It’s accessible through the client.moderation property.
Overview
val client = FeedsClient(context = context, apiKey = apiKey, user = user, tokenProvider = tokenProvider)
val moderation = client.moderationconst client = new FeedsClient("<API key>");
client.moderation;final client = StreamFeedsClient(apiKey: apiKey, user: user, tokenProvider: token);
final moderation = client.moderation;const apiKey = "";
const secret = "";
const client = new StreamClient(apiKey, secret);
client.moderation;// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();use GetStream\ClientBuilder;
$moderationClient = (new ClientBuilder())
->apiKey($apiKey)
->apiSecret($apiSecret)
->buildModerationClient();// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)User Moderation
Ban Users
Ban a user from the platform with various options including timeout, shadow bans, and IP bans.
let banRequest = BanRequest(
targetUserId: "user-123",
reason: "Violation of community guidelines",
timeout: 3600, // 1 hour in seconds
shadow: false,
ipBan: false
)
let response = try await client.moderation.ban(banRequest: banRequest)
print("User banned for: \(response.duration)")val banRequest = BanRequest(
targetUserId = "user-123",
reason = "Violation of community guidelines",
timeout = 3600, // 1 hour in seconds
shadow = false,
ipBan = false
)
val response: Result<BanResponse> = client.moderation.ban(banRequest = banRequest)
println("User banned for: ${response.getOrNull()?.duration}")await client.moderation.ban({
target_user_id: "user-123",
reason: "Violation of community guidelines",
timeout: 3600, // 1 hour in seconds
shadow: false,
ip_ban: false,
});const banRequest = BanRequest(
targetUserId: 'user-123',
reason: 'Violation of community guidelines',
timeout: 3600, // 1 hour in seconds
shadow: false,
ipBan: false,
);
final response = await client.moderation.ban(banRequest: banRequest);
print('User banned for: ${response.getOrThrow().duration}');await client.moderation.ban({
target_user_id: "user-123",
banned_by_id: "<banned by user id>",
reason: "Violation of community guidelines",
timeout: 3600, // 1 hour in seconds
shadow: false,
ip_ban: false,
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$banRequest = new GeneratedModels\BanRequest(
targetUserID: "user123",
reason: "Violation of community guidelines",
timeout: 60, // 1 hour in minutes
shadow: false,
ipBan: false,
bannedByID: "eric"
);
$response = $moderationClient->ban($banRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Parameters:
targetUserId: The ID of the user to banreason: Optional reason for the bantimeout: Optional timeout in seconds (null for permanent ban)shadow: Whether to perform a shadow ban (user doesn’t know they’re banned)ipBan: Whether to ban the user’s IP addressbannedBy: Optional user who performed the bandeleteMessages: Whether to delete user’s messages
Mute Users
Mute one or more users to prevent them from interacting with your content.
let muteRequest = MuteRequest(
targetIds: ["user-123", "user-456"],
timeout: 86400 // 24 hours in seconds
)
let response = try await client.moderation.mute(muteRequest: muteRequest)val muteRequest = MuteRequest(
targetIds = listOf("user-123", "user-456"),
timeout = 86400 // 24 hours in seconds
)
val response: Result<MuteResponse> = client.moderation.mute(muteRequest = muteRequest)client.moderation.mute({
target_ids: ["user-123", "user-456"],
timeout: 86400, // 24 hours in seconds
});const muteRequest = MuteRequest(
targetIds: ['user-123', 'user-456'],
timeout: 86400, // 24 hours in seconds
);
final response = await client.moderation.mute(muteRequest: muteRequest);client.moderation.mute({
target_ids: ["user-123", "user-456"],
user_id: "<moderator id>",
timeout: 86400, // 24 hours in seconds
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$muteRequest = new GeneratedModels\MuteRequest(
targetIds: ["user123", "user456"],
// 24 hours in minutes
timeout: 1440,
userID: "moderator"
);
$response = $moderationClient->mute($muteRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Parameters:
targetIds: Array of user IDs to mutetimeout: Optional timeout in seconds (null for permanent mute)
Block Users
Block a user to prevent them from following you or seeing your content.
let blockRequest = BlockUsersRequest(blockedUserId: "user-123")
let response = try await client.moderation.blockUsers(blockUsersRequest: blockRequest)val blockRequest = BlockUsersRequest(blockedUserId = "user-123")
val response: Result<BlockUsersResponse> = client.moderation.blockUser(blockRequest)await client.blockUsers({
blocked_user_id: "user-123",
});const blockRequest = BlockUsersRequest(blockedUserId: 'user-123');
final response = await client.moderation.blockUsers(blockUsersRequest: blockRequest);await client.blockUsers({
blocked_user_id: "user-123",
user_id: "<moderator id>",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$blockRequest = new GeneratedModels\BlockUsersRequest(
blockedUserID: "user123",
userID: "moderator"
);
$response = $moderationClient->blockUsers($blockRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Unblock Users
Unblock a previously blocked user.
let unblockRequest = UnblockUsersRequest(blockedUserId: "user-123")
let response = try await client.moderation.unblockUsers(unblockUsersRequest: unblockRequest)val unblockRequest = UnblockUsersRequest(blockedUserId = "user-123")
val response: Result<UnblockUsersResponse> = client.moderation.unblockUser(unblockRequest)client.unblockUsers({
blocked_user_id: "user-123",
});const unblockRequest = UnblockUsersRequest(blockedUserId: 'user-123');
final response = await client.moderation.unblockUsers(unblockUsersRequest: unblockRequest);client.unblockUsers({
blocked_user_id: "user-123",
user_id: "<moderator user id>",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$request = new GeneratedModels\UnblockUsersRequest(
blockedUserID: "user123",
userID: "moderator"
);
$response = $moderationClient->unblockUsers($request);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Get Blocked Users
Retrieve a list of users you have blocked.
let blockedUsers = try await client.moderation.getBlockedUsers()
for user in blockedUsers.users {
print("Blocked user: \(user.id)")
}val blockedUsers: Result<GetBlockedUsersResponse> = client.moderation.getBlockedUsers()
blockedUsers.getOrNull()?.blocks?.forEach { block ->
println("Blocked user: ${block.user.id}")
}client.getBlockedUsers();final blockedUsers = await client.moderation.getBlockedUsers();
for (final user in blockedUsers.getOrThrow().blocks) {
print('Blocked user: ${user.blockedUserId}');
}client.getBlockedUsers({ user_id: "<moderator user id>" });// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$response = $client->getBlockedUsers('moderator');// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Content Moderation
Flag Content
Flag inappropriate content for moderation review.
let flagRequest = FlagRequest(
entityId: "activity-123",
entityType: "activity",
reason: "Inappropriate content",
entityCreatorId: "user-456"
)
let response = try await client.moderation.flag(flagRequest: flagRequest)val flagRequest = FlagRequest(
entityId = "activity-123",
entityType = "activity",
reason = "Inappropriate content",
entityCreatorId = "user-456"
)
val response: Result<FlagResponse> = client.moderation.flag(flagRequest = flagRequest)await client.moderation.flag({
entity_type: "activity",
entity_id: "activity_123",
reason: "Inappropriate content",
});const flagRequest = FlagRequest(
entityId: 'activity-123',
entityType: 'activity',
reason: 'Inappropriate content',
entityCreatorId: 'user-456',
);
final response = await client.moderation.flag(flagRequest: flagRequest);await client.moderation.flag({
entity_type: "activity",
entity_id: "activity_123",
reason: "Inappropriate content",
user_id: "<moderator user id>",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$flagRequest = new GeneratedModels\FlagRequest(
entityID: "activity-123",
entityType: "activity",
reason: "Inappropriate content",
entityCreatorID: "user-456",
userID: "moderator"
);
$response = $moderationClient->flag($flagRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Parameters:
entityId: The ID of the content to flagentityType: The type of content (e.g., “activity”, “comment”)reason: Optional reason for flaggingentityCreatorId: Optional ID of the content creatorcustom: Optional custom data for the flag
Submit Moderation Actions
Submit moderation actions for flagged content.
let actionRequest = SubmitActionRequest(
// Action details for moderation
)
let response = try await client.moderation.submitAction(submitActionRequest: actionRequest)val actionRequest = SubmitActionRequest(
// Action details for moderation
)
val response: Result<SubmitActionResponse> = client.moderation.submitAction(submitActionRequest = actionRequest)client.moderation.submitAction({
// Action details for moderation
});const actionRequest = SubmitActionRequest(
// Action details for moderation
);
final response = await client.moderation.submitAction(submitActionRequest: actionRequest);client.moderation.submitAction({
// Action details for moderation
user_id: "<user id>",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$actionRequest = new GeneratedModels\SubmitActionRequest(
// Action details for moderation
userID: "moderator"
);
$response = $moderationClient->submitAction($actionRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Review Queue
Query Review Queue
Retrieve items in the moderation review queue.
let queryRequest = QueryReviewQueueRequest(
// Query parameters for filtering and pagination
)
let reviewQueue = try await client.moderation.queryReviewQueue(queryReviewQueueRequest: queryRequest)val queryRequest = QueryReviewQueueRequest(
// Query parameters for filtering and pagination
)
val reviewQueue: Result<QueryReviewQueueResponse> = client.moderation.queryReviewQueue(queryRequest)client.moderation.queryReviewQueue({
// Query parameters for filtering and pagination
});const queryRequest = QueryReviewQueueRequest(
// Query parameters for filtering and pagination
);
final reviewQueue = await client.moderation
.queryReviewQueue(queryReviewQueueRequest: queryRequest);client.moderation.queryReviewQueue({
// Query parameters for filtering and pagination
user_id: user.id,
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$queryRequest = new GeneratedModels\QueryReviewQueueRequest(
limit: 10,
lockItems: false,
userID: "moderator"
);
$response = $moderationClient->queryReviewQueue($queryRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Configuration Management
Upsert Moderation Configuration
Create or update moderation configuration settings.
let configRequest = UpsertConfigRequest(
// Configuration details
)
let response = try await client.moderation.upsertConfig(upsertConfigRequest: configRequest)val configRequest = UpsertConfigRequest(
// Configuration details
)
val response: Result<UpsertConfigResponse> = client.moderation.upsertConfig(configRequest)client.moderation.upsertConfig({
key: 'feeds',
block_list_config: {
enabled: true,
rules: [
// Names of existing block lists
{ name: blocklistName, action: 'remove' },
{ name: flagBlocklistName, action: 'flag' },
{ name: shadowBlocklistName, action: 'shadow' },
{ name: bounceBlocklistName, action: 'bounce' },
],
},
ai_image_config: {
enabled: true,
rules: [{ label: 'Non-Explicit Nudity', action: 'remove', min_confidence: 0 }],
ocr_rules: [{ label: 'Non-Explicit Nudity', action: 'remove' }],
},
}),const upsertRequest = UpsertConfigRequest(
// Configuration details for moderation
);
final response = await client.moderation.upsertConfig(upsertRequest);client.moderation.upsertConfig({
key: 'feeds',
block_list_config: {
enabled: true,
rules: [
// Names of existing block lists
{ name: blocklistName, action: 'remove' },
{ name: flagBlocklistName, action: 'flag' },
{ name: shadowBlocklistName, action: 'shadow' },
{ name: bounceBlocklistName, action: 'bounce' },
],
},
ai_image_config: {
enabled: true,
rules: [{ label: 'Non-Explicit Nudity', action: 'remove', min_confidence: 0 }],
ocr_rules: [{ label: 'Non-Explicit Nudity', action: 'remove' }],
},
}),// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$configRequest = new GeneratedModels\UpsertConfigRequest(
key: "feeds",
blockListConfig: new GeneratedModels\BlockListConfig(
enabled: true,
rules: [
new GeneratedModels\BlockListRule(
name: "blocklist",
action: "remove",
)
]
),
userID: "moderator"
);
$response = $moderationClient->upsertConfig($configRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Get Moderation Configuration
Retrieve a specific moderation configuration.
let config = try await client.moderation.getConfig(key: "automod_settings", team: "team-123")val config: Result<GetConfigResponse> = client.moderation.getConfig(key = "automod_settings", team = "team-123")client.moderation.getConfig({
key: "feeds",
});final config = await client.moderation.getConfig(key: 'feeds');client.moderation.getConfig({
key: "feeds",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$response = $moderationClient->getConfig("feeds", "");// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Parameters:
key: The configuration key to retrieveteam: Optional team identifier
Delete Moderation Configuration
Remove a moderation configuration.
let response = try await client.moderation.deleteConfig(key: "automod_settings", team: "team-123")val response: Result<DeleteModerationConfigResponse> = client.moderation.deleteConfig(key = "automod_settings", team = "team-123")client.moderation.deleteConfig({
key: "feeds",
});final response = await client.moderation.deleteConfig(key: 'feeds');client.moderation.deleteConfig({
key: "feeds",
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$response = $moderationClient->deleteConfig("feeds", "");// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Query Moderation Configurations
Search and filter moderation configurations.
let queryRequest = QueryModerationConfigsRequest(
// Query parameters for filtering and pagination
)
let configs = try await client.moderation.queryModerationConfigs(queryModerationConfigsRequest: queryRequest)val queryRequest = QueryModerationConfigsRequest(
// Query parameters for filtering and pagination
)
val configs: Result<QueryModerationConfigsResponse> = client.moderation.queryModerationConfigs(queryModerationConfigsRequest = queryRequest)client.moderation.queryModerationConfigs({
filter: {
// Filter params
},
});const queryRequest = ModerationConfigsQuery(
// Query parameters for filtering and pagination
);
final configs = await client.moderation
.queryModerationConfigs(queryModerationConfigsRequest: queryRequest);client.moderation.queryModerationConfigs({
filter: {
// Filter params
},
});// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();$queryRequest = new GeneratedModels\QueryModerationConfigsRequest(
limit: 10,
filter: (object)[
"key" => (object)['$in' => ["feeds", "automod"]]
]
);
$response = $moderationClient->queryModerationConfigs($queryRequest);// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)Moderation Config Queryable Built-In Fields
| name | type | description | supported operations | example |
|---|---|---|---|---|
key | string or list of strings | The configuration key identifier | $eq, $ne, $gt, $gte, $lt, $lte, $in, $nin, $exists, $autocomplete | { key: { $autocomplete: 'spam' } } |
team | string or list of strings | The team identifier for multi-tenant applications | $eq, $ne, $gt, $gte, $lt, $lte, $in, $nin, $exists | { team: { $eq: 'team_123' } } |
created_at | string, must be formatted as an RFC3339 timestamp | The time the configuration was created | $eq, $gt, $gte, $lt, $lte | { created_at: { $gte: '2023-12-04T09:30:20.45Z' } } |
updated_at | string, must be formatted as an RFC3339 timestamp | The time the configuration was last updated | $eq, $gt, $gte, $lt, $lte | { updated_at: { $gte: '2023-12-04T09:30:20.45Z' } } |
Error Handling
All moderation methods can throw errors. Handle them appropriately:
do {
let response = try await client.moderation.ban(banRequest: banRequest)
print("User banned successfully")
} catch {
print("Failed to ban user: \(error)")
}// Functions that can result in errors, return Kotlin `Result` objects
val result: Result<BanResponse> = client.moderation.ban(banRequest)try {
const response = await client.moderation.ban({
target_user_id: "user-123",
reason: "Violation of community guidelines",
timeout: 3600, // 1 hour in seconds
shadow: false,
ip_ban: false,
});
console.log(`Banned successfully`);
} catch (e) {
console.error(`Failed to ban user, error: ${e}`);
}final response = await client.moderation.ban(banRequest: banRequest);
switch (response) {
case Success(data: final banResponse):
print('User banned for: ${banResponse.duration}');
case Failure(error: final error):
print('Error banning user: $error');
}try {
const response = await client.moderation.ban({
target_user_id: "user-123",
banned_by_id: "<banned by user id>",
reason: "Violation of community guidelines",
timeout: 3600, // 1 hour in seconds
shadow: false,
ip_ban: false,
});
console.log(`Banned successfully`);
} catch (e) {
console.error(`Failed to ban user, error: ${e}`);
}// Coming soonActivityFeedbackRequest moderationRequest =
ActivityFeedbackRequest.builder()
.report(true)
.reason("inappropriate_content")
.userID(testUserId2) // Different user reporting
.build();
ActivityFeedbackResponse moderationResponse =
feeds.activityFeedback(activityId, moderationRequest).execute().getData();try {
$banRequest = new GeneratedModels\BanRequest(
targetUserID: "user-123",
reason: "Violation of community guidelines",
timeout: 60, // 1 hour in minutes
shadow: false,
ipBan: false,
bannedByID: "moderator"
);
$response = $moderationClient->ban($banRequest);
echo "User banned successfully\n";
} catch (Exception $e) {
echo "Failed to ban user: " . $e->getResponseBody() . "\n";
}// Note: Moderation typically requires admin permissions
// This test demonstrates the API structure
Console.WriteLine($"Activity {activityId} would be moderated here");
// In a real scenario, you would call moderation endpointsmoderation_response = self.client.feeds.activity_feedback(
activity_id,
report=True,
reason="inappropriate_content",
user_id=self.test_user_id_2, # Different user reporting
)