1const axios = require('axios');
3const fs = require('fs');
4const path = require('path');
6async function toB64(imgPath) {
7    const data = fs.readFileSync(path.resolve(imgPath));
8    return Buffer.from(data).toString('base64');
11const api_key = "YOUR API-KEY";
12const url = "";
14const data = {
15  "image": "toB64('')",
16  "samples": 1,
17  "prompt": "a beautiful fashion model, wearing a red polka dress, red door background. hyperrealistic. photorealism, 4k, extremely detailed",
18  "negative_prompt": "Disfigured, cartoon, blurry, nude",
19  "scheduler": "UniPC",
20  "num_inference_steps": 25,
21  "guidance_scale": 7.5,
22  "strength": 1,
23  "seed": 919194474388,
24  "base64": false
27(async function() {
28    try {
29        const response = await, data, { headers: { 'x-api-key': api_key } });
30        console.log(;
31    } catch (error) {
32        console.error('Error:',;
33    }
HTTP Response Codes
200 - OKImage Generated
401 - UnauthorizedUser authentication failed
404 - Not FoundThe requested URL does not exist
405 - Method Not AllowedThe requested HTTP method is not allowed
406 - Not AcceptableNot enough credits
500 - Server ErrorServer had some issue with processing


imageimage *

Input Image

samplesint ( default: 1 ) Affects Pricing

Number of samples to generate.

min : 1,

min : 4

promptstr *

Prompt to render

negative_promptstr ( default: None )

Prompts to exclude, eg. 'bad anatomy, bad hands, missing fingers'

schedulerenum:str ( default: UniPC )

Type of scheduler.

Allowed values:

num_inference_stepsint ( default: 20 ) Affects Pricing

Number of denoising steps.

min : 20,

min : 100

guidance_scalefloat ( default: 7.5 )

Scale for classifier-free guidance

min : 0.1,

min : 25

strengthfloat ( default: 1 )

How much to transform the reference image

min : 0.1,

min : 1

seedint ( default: -1 )

Seed for image generation.

base64boolean ( default: 1 )

Base64 encoding of the output image.

To keep track of your credit usage, you can inspect the response headers of each API call. The x-remaining-credits property will indicate the number of remaining credits in your account. Ensure you monitor this value to avoid any disruptions in your API usage.

ControlNet Openpose SD1.5

ControlNet OpenPose: A Fusion of Precision and Power in Human Pose Estimation. Dive into the world of advanced computer vision with ControlNet OpenPose, a unique blend of ControlNet's capabilities and OpenPose's renowned human pose estimation prowess. This integration not only elevates the features of both systems but also offers users unparalleled control within the Stable Diffusion framework.

At the heart of ControlNet OpenPose lies a synergistic combination of ControlNet's robust control mechanisms and OpenPose's state-of-the-art pose estimation algorithms. This architecture is meticulously designed to process vast amounts of visual data, ensuring accurate and real-time human pose detection and manipulation.


  1. Enhanced Precision: By merging ControlNet with OpenPose, users experience a significant boost in the accuracy of pose estimation.

  2. Real-time Manipulation: The integrated system allows for instantaneous adjustments and control of human poses.

  3. Versatility: Suitable for a range of applications, from animation to fitness tracking, thanks to its comprehensive feature set.

  4. Optimized for Stable Diffusion: Achieve controlled and targeted results within the Stable Diffusion framework, especially when working with human subjects.

Use Cases

  1. Film and Animation: Animators can use ControlNet OpenPose to create realistic human movements and postures in animated sequences.

  2. Fashion and Apparel Design: Designers can create virtual models with accurate human poses for fitting and design visualizations, leading to more precise clothing design and cost-cutting production processes

ControlNet Openpose License

ControlNet Openpose, in its commitment to ethical AI practices, has embraced the CreativeML OpenRAIL M license. This decision not only underscores the model's dedication to responsible AI but also aligns it with the principles set forth by BigScience and the RAIL Initiative. Their collaborative work in AI ethics and responsibility has set the benchmark for licenses like the OpenRAIL M.