Skip to content About The people and vision powering Probo Blog The latest news from Probo Stories Hear from our customers Docs Documentation for Probo GitHub Explore our open-source compliance tools

OpenAI / ChatGPT Configuration

This guide shows you how to integrate the Probo MCP Server with OpenAI’s GPT models and ChatGPT, enabling AI assistants to interact with your compliance data.

OpenAI doesn’t natively support the Model Context Protocol (MCP) yet, but you can integrate Probo’s MCP API with OpenAI in several ways:

  1. Function Calling - Convert MCP tools to OpenAI function calls
  2. Custom GPT Actions - Create a custom GPT with Probo integration
  3. API Integration - Build applications that combine OpenAI + Probo MCP

Method 1: Function Calling with OpenAI API

Section titled “Method 1: Function Calling with OpenAI API”

Convert Probo MCP tools to OpenAI function calls in your application.

import openai
import requests
class ProboMCPClient:
def __init__(self, base_url, api_token):
self.base_url = base_url
self.headers = {
"Authorization": f"Bearer {api_token}",
"Content-Type": "application/json"
}
def call_tool(self, tool_name, arguments):
response = requests.post(
f"{self.base_url}/tools/{tool_name}",
headers=self.headers,
json=arguments
)
return response.json()
# Initialize clients
probo = ProboMCPClient(
base_url="https://your-probo-instance.com/api/mcp",
api_token="your_api_token_here"
)
openai_client = openai.OpenAI(api_key="your_openai_api_key")
# Define Probo tools as OpenAI functions
tools = [
{
"type": "function",
"function": {
"name": "listRisks",
"description": "List all risks for the organization",
"parameters": {
"type": "object",
"properties": {
"organization_id": {
"type": "string",
"description": "Organization ID"
},
"filter": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query"
}
}
}
},
"required": ["organization_id"]
}
}
},
{
"type": "function",
"function": {
"name": "addVendor",
"description": "Add a new vendor to the organization",
"parameters": {
"type": "object",
"properties": {
"organization_id": {
"type": "string",
"description": "Organization ID"
},
"name": {
"type": "string",
"description": "Vendor name"
},
"description": {
"type": "string",
"description": "Vendor description"
}
},
"required": ["organization_id", "name"]
}
}
}
]
# Make a request with function calling
messages = [
{"role": "user", "content": "List all high-priority risks"}
]
response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
tools=tools,
tool_choice="auto"
)
# Handle function calls
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Call Probo MCP
result = probo.call_tool(function_name, function_args)
# Add function response to messages
messages.append({
"role": "function",
"name": function_name,
"content": json.dumps(result)
})
# Get final response
final_response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages
)
print(final_response.choices[0].message.content)
import OpenAI from 'openai';
import fetch from 'node-fetch';
class ProboMCPClient {
constructor(baseUrl, apiToken) {
this.baseUrl = baseUrl;
this.apiToken = apiToken;
}
async callTool(toolName, args) {
const response = await fetch(`${this.baseUrl}/tools/${toolName}`, {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.apiToken}`,
'Content-Type': 'application/json'
},
body: JSON.stringify(args)
});
return await response.json();
}
}
// Initialize clients
const probo = new ProboMCPClient(
'https://your-probo-instance.com/api/mcp',
process.env.PROBO_API_TOKEN
);
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
// Define Probo tools
const tools = [
{
type: 'function',
function: {
name: 'listOrganizations',
description: 'List all organizations the user has access to',
parameters: {
type: 'object',
properties: {},
required: []
}
}
},
{
type: 'function',
function: {
name: 'listVendors',
description: 'List all vendors for the organization',
parameters: {
type: 'object',
properties: {
organization_id: {
type: 'string',
description: 'Organization ID'
}
},
required: ['organization_id']
}
}
}
];
// Make request with function calling
async function chat(userMessage) {
const messages = [{ role: 'user', content: userMessage }];
const response = await openai.chat.completions.create({
model: 'gpt-4-turbo-preview',
messages: messages,
tools: tools,
tool_choice: 'auto'
});
// Handle tool calls
if (response.choices[0].message.tool_calls) {
for (const toolCall of response.choices[0].message.tool_calls) {
const functionName = toolCall.function.name;
const functionArgs = JSON.parse(toolCall.function.arguments);
// Call Probo MCP
const result = await probo.callTool(functionName, functionArgs);
messages.push({
role: 'function',
name: functionName,
content: JSON.stringify(result)
});
}
// Get final response
const finalResponse = await openai.chat.completions.create({
model: 'gpt-4-turbo-preview',
messages: messages
});
return finalResponse.choices[0].message.content;
}
return response.choices[0].message.content;
}
// Usage
const answer = await chat('Show me all vendors in my organization');
console.log(answer);

Create a custom GPT in ChatGPT with Probo integration.

Create an OpenAPI spec for your Probo MCP endpoints:

openapi: 3.0.0
info:
title: Probo Compliance API
version: 1.0.0
description: API for managing compliance data in Probo
servers:
- url: https://your-probo-instance.com/api/mcp
security:
- BearerAuth: []
paths:
/tools/listOrganizations:
post:
summary: List organizations
operationId: listOrganizations
responses:
'200':
description: Successful response
content:
application/json:
schema:
type: object
properties:
organizations:
type: array
items:
type: object
/tools/listVendors:
post:
summary: List vendors
operationId: listVendors
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
organization_id:
type: string
required:
- organization_id
responses:
'200':
description: Successful response
/tools/addVendor:
post:
summary: Add a vendor
operationId: addVendor
requestBody:
required: true
content:
application/json:
schema:
type: object
properties:
organization_id:
type: string
name:
type: string
description:
type: string
required:
- organization_id
- name
responses:
'200':
description: Vendor created successfully
components:
securitySchemes:
BearerAuth:
type: http
scheme: bearer
  1. Go to ChatGPT
  2. Click on your profile → “My GPTs”
  3. Click “Create a GPT”
  4. Configure the GPT:

Name: Probo Compliance Assistant

Description: Manage your compliance data, risks, vendors, and audits through natural conversation.

Instructions:

You are a compliance management assistant with access to Probo compliance data.
You can help users:
- List and manage organizations, vendors, and people
- Create and update risk assessments
- Track compliance measures and frameworks
- Manage assets and data classifications
- Handle nonconformities and obligations
- Track audits and continual improvements
Always confirm before making changes to compliance data.
Provide clear summaries of compliance status.
Suggest actions to improve compliance posture.

Actions: Import your OpenAPI specification

Authentication: API Key (Bearer token)

In the Custom GPT actions settings:

  1. Authentication Type: API Key
  2. API Key: your_probo_api_token
  3. Auth Type: Bearer
  4. Custom Header Name: Authorization

Try these prompts:

"What organizations do I have access to?"
"List all vendors for organization org_xxx"
"Add a new vendor called CloudBackup Inc"
"Show me all high-priority risks"

If you have access to ChatGPT plugins (legacy system):

Create .well-known/ai-plugin.json:

{
"schema_version": "v1",
"name_for_human": "Probo Compliance",
"name_for_model": "probo_compliance",
"description_for_human": "Manage compliance data, risks, and audits",
"description_for_model": "Access and manage compliance data including organizations, vendors, risks, measures, frameworks, assets, data classifications, nonconformities, obligations, continual improvements, and audits.",
"auth": {
"type": "user_http",
"authorization_type": "bearer"
},
"api": {
"type": "openapi",
"url": "https://your-probo-instance.com/.well-known/openapi.yaml"
},
"logo_url": "https://your-probo-instance.com/logo.png",
"contact_email": "support@your-domain.com",
"legal_info_url": "https://your-probo-instance.com/legal"
}
  1. Host the manifest at https://your-probo-instance.com/.well-known/ai-plugin.json
  2. Host the OpenAPI spec at https://your-probo-instance.com/.well-known/openapi.yaml
  3. Enable CORS for chat.openai.com
  4. Install the plugin in ChatGPT settings

Build a web application that combines OpenAI and Probo:

# app.py - Flask application
from flask import Flask, request, jsonify
import openai
import requests
app = Flask(__name__)
class ComplianceAssistant:
def __init__(self, openai_key, probo_url, probo_token):
self.openai_client = openai.OpenAI(api_key=openai_key)
self.probo_url = probo_url
self.probo_token = probo_token
def get_probo_tools(self):
# Define all Probo MCP tools as OpenAI functions
return [
{
"type": "function",
"function": {
"name": "listRisks",
"description": "List all risks for the organization",
"parameters": {
"type": "object",
"properties": {
"organization_id": {"type": "string"}
},
"required": ["organization_id"]
}
}
}
# Add more tools...
]
def call_probo_tool(self, tool_name, args):
response = requests.post(
f"{self.probo_url}/tools/{tool_name}",
headers={
"Authorization": f"Bearer {self.probo_token}",
"Content-Type": "application/json"
},
json=args
)
return response.json()
def chat(self, message, conversation_history=[]):
messages = conversation_history + [
{"role": "user", "content": message}
]
response = self.openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
tools=self.get_probo_tools(),
tool_choice="auto"
)
# Handle tool calls
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
result = self.call_probo_tool(
tool_call.function.name,
json.loads(tool_call.function.arguments)
)
messages.append({
"role": "function",
"name": tool_call.function.name,
"content": json.dumps(result)
})
final_response = self.openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages
)
return final_response.choices[0].message.content
return response.choices[0].message.content
assistant = ComplianceAssistant(
openai_key=os.getenv("OPENAI_API_KEY"),
probo_url=os.getenv("PROBO_URL"),
probo_token=os.getenv("PROBO_TOKEN")
)
@app.route('/chat', methods=['POST'])
def chat():
data = request.json
response = assistant.chat(
data['message'],
data.get('history', [])
)
return jsonify({"response": response})
if __name__ == '__main__':
app.run(debug=True)

When integrating with OpenAI:

  1. Never expose API tokens in client-side code
  2. Use server-side proxies for API calls
  3. Validate all inputs before calling Probo MCP
  4. Implement rate limiting to prevent abuse
  5. Log all API calls for audit trails
  6. Use environment variables for secrets
  7. Implement proper error handling
  1. Cache Responses: Cache Probo MCP responses to reduce API calls
  2. Batch Operations: Group multiple operations when possible
  3. Validate Data: Always validate data before updating compliance records
  4. Error Handling: Provide clear error messages to users
  5. User Confirmation: Require confirmation for destructive operations
  6. Audit Logging: Log all changes made through the AI assistant

Need help with OpenAI integration?