Documentation
Integration Patterns
Learn proven integration patterns for embedding PolicyCortex governance into your existing development workflows, security tools, and enterprise systems.
Integration Approaches
🔄 Event-Driven
React to events with webhooks and streaming
• Real-time policy enforcement
• Immediate violation response
• Workflow automation triggers
• System state synchronization
📊 Batch Processing
Scheduled data synchronization and reporting
• Daily compliance reports
• Bulk policy deployments
• Historical data analysis
• Resource inventory sync
🔍 Pull-Based
On-demand data retrieval and queries
• Dashboard data loading
• Interactive compliance checks
• User-initiated scans
• Custom report generation
🌐 Hybrid
Combine multiple patterns for optimal results
• Critical events real-time
• Regular reports scheduled
• Interactive queries on-demand
• Fallback mechanisms
Event-Driven Integration Patterns
Webhook Integration
Express.js Webhook Handlerjavascript
const express = require('express');
const crypto = require('crypto');
const app = express();
// Middleware to verify webhook signature
function verifySignature(req, res, next) {
const signature = req.headers['x-policycortex-signature'];
const payload = JSON.stringify(req.body);
const secret = process.env.WEBHOOK_SECRET;
const expectedSignature = crypto
.createHmac('sha256', secret)
.update(payload)
.digest('hex');
if (signature !== `sha256=${expectedSignature}`) {
return res.status(401).json({ error: 'Invalid signature' });
}
next();
}
// Handle policy violation events
app.post('/webhooks/violations', express.json(), verifySignature, async (req, res) => {
const { type, data } = req.body;
switch (type) {
case 'policy.violation.created':
await handleNewViolation(data.violation);
break;
case 'policy.violation.resolved':
await handleResolvedViolation(data.violation);
break;
case 'compliance.status.changed':
await updateComplianceDashboard(data.compliance);
break;
}
res.status(200).json({ received: true });
});
async function handleNewViolation(violation) {
// Create JIRA ticket for high/critical violations
if (['HIGH', 'CRITICAL'].includes(violation.severity)) {
await createJiraTicket({
summary: `Policy Violation: ${violation.policy_name}`,
description: `Resource: ${violation.resource_id}\nMessage: ${violation.message}`,
priority: violation.severity === 'CRITICAL' ? 'Highest' : 'High',
assignee: getResourceOwner(violation.resource_id)
});
}
// Send Slack notification
await sendSlackAlert({
channel: '#security-alerts',
text: `🚨 New ${violation.severity} violation detected`,
blocks: [
{
type: 'section',
text: {
type: 'mrkdwn',
text: `*Policy:* ${violation.policy_name}\n*Resource:* ${violation.resource_id}\n*Message:* ${violation.message}`
}
}
]
});
}EventBridge Integration
AWS EventBridge Patternjson
{
"Rules": [
{
"Name": "PolicyViolationRule",
"EventPattern": {
"source": ["policycortex"],
"detail-type": ["Policy Violation"],
"detail": {
"severity": ["HIGH", "CRITICAL"],
"resource_type": ["AWS::S3::Bucket", "AWS::EC2::SecurityGroup"]
}
},
"Targets": [
{
"Id": "SecurityLambda",
"Arn": "arn:aws:lambda:us-east-1:123456789012:function:HandleSecurityViolation"
},
{
"Id": "NotificationSNS",
"Arn": "arn:aws:sns:us-east-1:123456789012:security-alerts"
}
]
},
{
"Name": "ComplianceStatusRule",
"EventPattern": {
"source": ["policycortex"],
"detail-type": ["Compliance Status Change"],
"detail": {
"framework": ["SOC2", "HIPAA"],
"status": ["NON_COMPLIANT"]
}
},
"Targets": [
{
"Id": "ComplianceTeamSNS",
"Arn": "arn:aws:sns:us-east-1:123456789012:compliance-alerts"
}
]
}
]
}CI/CD Integration Patterns
Gate Pattern - Policy as Quality Gate
GitHub Actions Policy Gateyaml
name: Infrastructure Policy Gate
on:
pull_request:
paths:
- 'infrastructure/**'
- 'terraform/**'
jobs:
policy-gate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
# Install PolicyCortex CLI
- name: Setup PolicyCortex
run: |
curl -fsSL https://cli.policycortex.com/install.sh | sh
echo "$HOME/.policycortex/bin" >> $GITHUB_PATH
# Run pre-deployment policy scan
- name: Policy Validation Gate
id: policy-scan
run: |
# Scan infrastructure changes
policycortex scan infrastructure \
--path ./infrastructure \
--frameworks soc2,hipaa \
--output json > scan-results.json
# Check for gate-breaking violations
CRITICAL_COUNT=$(jq '.violations[] | select(.severity=="CRITICAL") | length' scan-results.json)
HIGH_COUNT=$(jq '.violations[] | select(.severity=="HIGH") | length' scan-results.json)
# Fail if critical violations or too many high violations
if [ "$CRITICAL_COUNT" -gt 0 ] || [ "$HIGH_COUNT" -gt 3 ]; then
echo "Policy gate failed: $CRITICAL_COUNT critical, $HIGH_COUNT high violations"
echo "gate_status=failed" >> $GITHUB_OUTPUT
exit 1
else
echo "Policy gate passed"
echo "gate_status=passed" >> $GITHUB_OUTPUT
fi
# Comment on PR with results
- name: Policy Results Comment
uses: actions/github-script@v6
if: always()
with:
script: |
const fs = require('fs');
const results = JSON.parse(fs.readFileSync('scan-results.json', 'utf8'));
const gateStatus = process.env.GATE_STATUS;
const violations = results.violations || [];
const critical = violations.filter(v => v.severity === 'CRITICAL').length;
const high = violations.filter(v => v.severity === 'HIGH').length;
const medium = violations.filter(v => v.severity === 'MEDIUM').length;
const statusEmoji = gateStatus === 'passed' ? '✅' : '❌';
const comment = `
## ${statusEmoji} Policy Validation Results
**Gate Status:** ${gateStatus === 'passed' ? 'PASSED' : 'FAILED'}
| Severity | Count |
|----------|-------|
| Critical | ${critical} |
| High | ${high} |
| Medium | ${medium} |
${gateStatus === 'failed' ? '❌ **Deployment blocked** - Please resolve critical violations before merging.' : '✅ **Ready to deploy** - All policy checks passed.'}
`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: comment
});Deployment Safety Pattern
Progressive Policy Enforcementyaml
# Multi-stage policy enforcement
stages:
- stage: "development"
policy_enforcement: "warn_only"
frameworks: ["basic_security"]
- stage: "staging"
policy_enforcement: "block_high"
frameworks: ["soc2", "security_baseline"]
- stage: "production"
policy_enforcement: "block_medium"
frameworks: ["soc2", "hipaa", "pci_dss"]
# Deployment pipeline with policy checkpoints
deployment:
dev_deployment:
before_deploy:
- policy_scan:
severity_threshold: "none"
action: "warn"
staging_deployment:
before_deploy:
- policy_scan:
severity_threshold: "high"
action: "block"
- compliance_check:
frameworks: ["soc2"]
min_score: 75
prod_deployment:
before_deploy:
- policy_scan:
severity_threshold: "medium"
action: "block"
- compliance_check:
frameworks: ["soc2", "hipaa"]
min_score: 90
- manual_approval:
required: true
approvers: ["security_team", "compliance_team"]Monitoring & Observability Integration
📊 Metrics Integration
Prometheus Metricsyaml
# Custom metrics export
metrics:
- name: "policy_violations_total"
type: "counter"
labels: ["severity", "framework", "resource_type"]
- name: "compliance_score"
type: "gauge"
labels: ["framework", "account_id"]
- name: "remediation_time_seconds"
type: "histogram"
labels: ["policy_name", "severity"]🔔 Alert Integration
PagerDuty Integrationjson
{
"routing_key": "your-integration-key",
"event_action": "trigger",
"payload": {
"summary": "Critical Policy Violation",
"severity": "critical",
"source": "PolicyCortex",
"custom_details": {
"policy": "{{violation.policy_name}}",
"resource": "{{violation.resource_id}}",
"account": "{{violation.account_id}}"
}
}
}Data Pipeline Integration Patterns
Streaming Data Integration
Apache Kafka Integrationjava
@Component
public class PolicyViolationConsumer {
@KafkaListener(topics = "policy-violations", groupId = "security-team")
public void handleViolation(@Payload PolicyViolation violation,
@Header KafkaHeaders headers) {
try {
// Process violation based on severity
switch (violation.getSeverity()) {
case CRITICAL:
handleCriticalViolation(violation);
break;
case HIGH:
handleHighViolation(violation);
break;
default:
logViolation(violation);
}
// Update violation processing metrics
meterRegistry.counter("violations.processed",
"severity", violation.getSeverity(),
"policy", violation.getPolicyName()
).increment();
} catch (Exception e) {
// Send to dead letter queue for manual processing
kafkaTemplate.send("policy-violations-dlq", violation);
log.error("Failed to process violation: {}", violation.getId(), e);
}
}
private void handleCriticalViolation(PolicyViolation violation) {
// Create high-priority incident
incidentService.createIncident(
IncidentRequest.builder()
.title("Critical Policy Violation: " + violation.getPolicyName())
.priority(Priority.P1)
.description(buildViolationDescription(violation))
.assignee(getResourceOwner(violation.getResourceId()))
.tags(Arrays.asList("security", "policy-violation"))
.build()
);
// Trigger automated containment if configured
if (violation.getPolicy().isAutoContainmentEnabled()) {
containmentService.executeContainment(violation);
}
}
}Integration Best Practices
✅ Design Principles
- • Design for failure - implement retry mechanisms
- • Use idempotent operations where possible
- • Implement circuit breakers for external systems
- • Store integration state for recovery
- • Monitor integration health continuously
- • Use async processing for non-critical paths
⚡ Performance Tips
- • Batch API calls when possible
- • Cache frequently accessed data
- • Use pagination for large datasets
- • Implement rate limiting and backoff
- • Filter data at the source
- • Use compression for large payloads