Client
PrompticClient (sync) and AsyncPrompticClient (async) provide full access to the Promptic REST API from Python. Both have identical method signatures.
Constructor
from promptic_sdk import PrompticClient, AsyncPrompticClient
# Sync
client = PrompticClient(
api_key=None, # str | None
access_token=None, # str | None — session token from device login
workspace_id=None, # str | None — required with access_token
endpoint=None, # str | None — default: https://promptic.eu
timeout=30.0, # float — request timeout in seconds
)
# Async
client = AsyncPrompticClient(...) # Same parametersBoth support the context manager protocol:
with PrompticClient() as client:
...
async with AsyncPrompticClient() as client:
...Workspace
client.get_workspace() -> WorkspaceReturns the current workspace details.
Components
client.list_components() -> ComponentList
client.create_component(name, *, description=None) -> ComponentCreated
client.get_component(component_id) -> Component
client.delete_component(component_id) -> NoneExperiments
client.list_experiments(
*, component_id=None, status=None, limit=50, offset=0
) -> ExperimentList
client.create_experiment(
ai_component_id,
target_model,
*,
task_type="classification", # "classification" | "textGeneration" | "structuredOutput"
initial_prompt=None,
name=None,
description=None,
provider="openai", # "openai" | "openrouter" | "custom" | "google"
optimizer="prompticV2", # "promptic" | "prompticV2" | "miproV2" | "bootstrapFewShot" | "gepa"
hyperparameters=None, # dict with epochs, trainSplitRatio, numFewShots, enableCot
initial_prediction_model_schema=None,
) -> Experiment
client.get_experiment(experiment_id) -> Experiment
client.update_experiment(experiment_id, **updates) -> Experiment
client.delete_experiment(experiment_id) -> None
client.start_experiment(experiment_id) -> ExperimentStarted
client.duplicate_experiment(
experiment_id,
*,
continue_from_optimized=False, # seed initial prompt from the source's best iteration
initial_prompt_override=None, # explicit initial prompt; ignored if continue_from_optimized=True
) -> Experimentduplicate_experiment clones a source experiment under the same AI component,
copying its observations and evaluators. By default the new experiment starts
from the source's initial prompt. Pass continue_from_optimized=True to seed
it from the source's best optimized iteration instead — useful for chaining
optimization runs after promising results.
The response includes a modelUnavailable flag, set when the source's target
model is no longer available in the workspace; update the model before
calling start_experiment.
Observations
client.list_observations(experiment_id) -> ObservationList
client.create_observations(
experiment_id,
observations, # list[dict] — each with "variables" (input values), "expected", optional "split"
) -> ObservationList
client.update_observation(experiment_id, observation_id, **data) -> Observation
client.delete_observation(experiment_id, observation_id) -> NoneEvaluators
client.list_evaluators(experiment_id) -> EvaluatorList
client.create_evaluators(
experiment_id,
evaluators, # list[dict] — each with "name", "type", "weight", optional "config"
) -> EvaluatorList
client.update_evaluator(experiment_id, evaluator_id, **data) -> Evaluator
client.delete_evaluator(experiment_id, evaluator_id) -> NoneIterations
client.list_iterations(experiment_id) -> IterationList
client.get_iteration(experiment_id, iteration_id) -> IterationWithScores
client.get_best_iteration(experiment_id) -> IterationWithScoresIterations return both overallNormalizedScore (train split) and
evalNormalizedScore (held-out eval split, null when no trainSplitRatio
is configured on the experiment). get_best_iteration ranks by the eval
score when available, otherwise by the overall score.
Deployments
client.get_deployment(component_id) -> Deployment | None
client.deploy(component_id, experiment_id) -> DeploymentCreated
client.undeploy(component_id) -> None
client.get_deployed_prompt(component_id) -> DeployedPrompt | NoneTraces
client.list_traces(
*, limit=50, offset=0, status=None, start_after=None, start_before=None
) -> TraceList
client.get_trace(trace_id) -> Trace
client.get_stats(*, days_back=30) -> TracingStatsDatasets
client.create_dataset(
component_id, name, *, description=None, trace_ids=None
) -> Dataset
client.list_datasets(component_id) -> DatasetList
client.get_dataset(component_id, dataset_id) -> DatasetWithItems
client.delete_dataset(component_id, dataset_id) -> NoneRuns
client.create_run(
component_id, dataset_id, *, name=None, trace_ids=None
) -> Run
client.list_runs(component_id) -> RunList
client.get_run(component_id, run_id) -> RunWithTraces
client.delete_run(component_id, run_id) -> NoneAnnotations
client.upsert_annotation(
component_id, run_id, trace_db_id,
*, rating=None, comment=None # rating: "positive" | "negative"
) -> Annotation
client.list_annotations(component_id, run_id) -> AnnotationList
client.list_dataset_annotations(component_id, dataset_id) -> AnnotationList
client.delete_annotation(component_id, run_id, annotation_id) -> NoneEvaluations
client.create_evaluation(
component_id, dataset_id,
*, name=None, run_id=None
) -> AgentEvaluation
client.list_evaluations(component_id) -> AgentEvaluationList
client.get_evaluation(component_id, evaluation_id) -> AgentEvaluation
client.wait_for_evaluation(
component_id, evaluation_id,
*, max_wait=300, poll_interval=2
) -> AgentEvaluationwait_for_evaluation polls until the evaluation status is completed or failed. Raises TimeoutError if max_wait seconds elapse.
Error handling
All methods raise PrompticAPIError on API errors:
from promptic_sdk import PrompticClient, PrompticAPIError
with PrompticClient() as client:
try:
exp = client.get_experiment("invalid-id")
except PrompticAPIError as e:
print(e.status_code) # 404
print(e.message) # "Not found"