Readme
This model doesn't have a readme.
SDXL Canny controlnet with LoRA support.
Run this model in Node.js with one line of code:
npm install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import Replicate from "replicate";
const replicate = new Replicate({
auth: process.env.REPLICATE_API_TOKEN,
});
Run pnyompen/sdxl-controlnet-lora-small using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
const output = await replicate.run(
"pnyompen/sdxl-controlnet-lora-small:d4cdee63b0fd50ec2fbff69e7b20bfca8dc556ee737a957ad8c0166f34359727",
{
input: {
image: "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
prompt: "shot in the style of sksfer, a woman in alaska",
img2img: false,
strength: 0.8,
clip_skip: 2,
remove_bg: false,
scheduler: "K_EULER",
lora_scale: 0.95,
num_outputs: 1,
lora_weights: "https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar",
guidance_scale: 7.5,
condition_scale: 0.5,
negative_prompt: "",
ip_adapter_scale: 1,
num_inference_steps: 40,
auto_generate_caption: false,
generated_caption_weight: 0.5
}
}
);
// To access the file URL:
console.log(output[0].url()); //=> "http://example.com"
// To write the file to disk:
fs.writeFile("my-image.png", output[0]);
To learn more, take a look at the guide on getting started with Node.js.
pip install replicate
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
import replicate
Run pnyompen/sdxl-controlnet-lora-small using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
output = replicate.run(
"pnyompen/sdxl-controlnet-lora-small:d4cdee63b0fd50ec2fbff69e7b20bfca8dc556ee737a957ad8c0166f34359727",
input={
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman in alaska",
"img2img": False,
"strength": 0.8,
"clip_skip": 2,
"remove_bg": False,
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar",
"guidance_scale": 7.5,
"condition_scale": 0.5,
"negative_prompt": "",
"ip_adapter_scale": 1,
"num_inference_steps": 40,
"auto_generate_caption": False,
"generated_caption_weight": 0.5
}
)
print(output)
To learn more, take a look at the guide on getting started with Python.
REPLICATE_API_TOKEN
environment variable:export REPLICATE_API_TOKEN=<paste-your-token-here>
Find your API token in your account settings.
Run pnyompen/sdxl-controlnet-lora-small using Replicate’s API. Check out the model's schema for an overview of inputs and outputs.
curl -s -X POST \
-H "Authorization: Bearer $REPLICATE_API_TOKEN" \
-H "Content-Type: application/json" \
-H "Prefer: wait" \
-d $'{
"version": "pnyompen/sdxl-controlnet-lora-small:d4cdee63b0fd50ec2fbff69e7b20bfca8dc556ee737a957ad8c0166f34359727",
"input": {
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman in alaska",
"img2img": false,
"strength": 0.8,
"clip_skip": 2,
"remove_bg": false,
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar",
"guidance_scale": 7.5,
"condition_scale": 0.5,
"negative_prompt": "",
"ip_adapter_scale": 1,
"num_inference_steps": 40,
"auto_generate_caption": false,
"generated_caption_weight": 0.5
}
}' \
https://api.replicate.com/v1/predictions
To learn more, take a look at Replicate’s HTTP API reference docs.
Add a payment method to run this model.
By signing in, you agree to our
terms of service and privacy policy
{
"completed_at": "2024-06-23T06:02:03.981923Z",
"created_at": "2024-06-23T06:01:43.851000Z",
"data_removed": false,
"error": null,
"id": "9e7sq1d8ndrgj0cg8eqsa578s4",
"input": {
"image": "https://replicate.delivery/pbxt/JiOTMCHj4oGrTTf8Pg2r7vyI8YdXc5jL2IDyC2SfhuggjYe6/out-0%20%281%29.png",
"prompt": "shot in the style of sksfer, a woman in alaska",
"img2img": false,
"strength": 0.8,
"scheduler": "K_EULER",
"lora_scale": 0.95,
"num_outputs": 1,
"lora_weights": "https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar",
"guidance_scale": 7.5,
"condition_scale": 0.5,
"negative_prompt": "",
"num_inference_steps": 40,
"auto_generate_caption": false
},
"logs": "Using seed: 47765\nloading custom weights\nweights not in cache\nEnsuring enough disk space...\nFree disk space: 1634558648320\nDownloading weights: https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar\ndownloading https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar\n2024-06-23T06:01:44Z | INFO | [ Initiating ] chunk_size=150M dest=./weights-cache/931be18428e37365 url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar\n2024-06-23T06:01:46Z | INFO | [ Complete ] dest=./weights-cache/931be18428e37365 size=\"186 MB\" total_elapsed=2.434s url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar\nb''\nDownloaded weights in 2.5596249103546143 seconds\nLoading fine-tuned model\nDoes not have Unet. assume we are using LoRA\nLoading Unet LoRA\nOriginal width:1024, height:1024\nAspect Ratio: 1.00\nnew_width:1024, new_height:1024\nPrompt: shot in the style of <s0><s1>, a woman in alaska\ntext2img mode\n 0%| | 0/40 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.19/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`\ndeprecate(\n 2%|▎ | 1/40 [00:00<00:14, 2.74it/s]\n 5%|▌ | 2/40 [00:00<00:13, 2.73it/s]\n 8%|▊ | 3/40 [00:01<00:13, 2.73it/s]\n 10%|█ | 4/40 [00:01<00:13, 2.73it/s]\n 12%|█▎ | 5/40 [00:01<00:12, 2.73it/s]\n 15%|█▌ | 6/40 [00:02<00:12, 2.74it/s]\n 18%|█▊ | 7/40 [00:02<00:12, 2.74it/s]\n 20%|██ | 8/40 [00:02<00:11, 2.74it/s]\n 22%|██▎ | 9/40 [00:03<00:11, 2.74it/s]\n 25%|██▌ | 10/40 [00:03<00:10, 2.74it/s]\n 28%|██▊ | 11/40 [00:04<00:10, 2.73it/s]\n 30%|███ | 12/40 [00:04<00:10, 2.73it/s]\n 32%|███▎ | 13/40 [00:04<00:09, 2.73it/s]\n 35%|███▌ | 14/40 [00:05<00:09, 2.73it/s]\n 38%|███▊ | 15/40 [00:05<00:09, 2.73it/s]\n 40%|████ | 16/40 [00:05<00:08, 2.73it/s]\n 42%|████▎ | 17/40 [00:06<00:08, 2.73it/s]\n 45%|████▌ | 18/40 [00:06<00:08, 2.73it/s]\n 48%|████▊ | 19/40 [00:06<00:07, 2.73it/s]\n 50%|█████ | 20/40 [00:07<00:07, 2.73it/s]\n 52%|█████▎ | 21/40 [00:07<00:06, 2.73it/s]\n 55%|█████▌ | 22/40 [00:08<00:06, 2.73it/s]\n 57%|█████▊ | 23/40 [00:08<00:06, 2.73it/s]\n 60%|██████ | 24/40 [00:08<00:05, 2.73it/s]\n 62%|██████▎ | 25/40 [00:09<00:05, 2.73it/s]\n 65%|██████▌ | 26/40 [00:09<00:05, 2.73it/s]\n 68%|██████▊ | 27/40 [00:09<00:04, 2.73it/s]\n 70%|███████ | 28/40 [00:10<00:04, 2.73it/s]\n 72%|███████▎ | 29/40 [00:10<00:04, 2.73it/s]\n 75%|███████▌ | 30/40 [00:10<00:03, 2.73it/s]\n 78%|███████▊ | 31/40 [00:11<00:03, 2.72it/s]\n 80%|████████ | 32/40 [00:11<00:02, 2.72it/s]\n 82%|████████▎ | 33/40 [00:12<00:02, 2.72it/s]\n 85%|████████▌ | 34/40 [00:12<00:02, 2.72it/s]\n 88%|████████▊ | 35/40 [00:12<00:01, 2.72it/s]\n 90%|█████████ | 36/40 [00:13<00:01, 2.72it/s]\n 92%|█████████▎| 37/40 [00:13<00:01, 2.72it/s]\n 95%|█████████▌| 38/40 [00:13<00:00, 2.72it/s]\n 98%|█████████▊| 39/40 [00:14<00:00, 2.72it/s]\n100%|██████████| 40/40 [00:14<00:00, 2.72it/s]\n100%|██████████| 40/40 [00:14<00:00, 2.73it/s]",
"metrics": {
"predict_time": 20.08523615,
"total_time": 20.130923
},
"output": [
"https://replicate.delivery/pbxt/PtfoygW38dw4dqfWutkOcYUDelpveL0oVb6BKoFvDVioFbFMB/out-0.png"
],
"started_at": "2024-06-23T06:01:43.896686Z",
"status": "succeeded",
"urls": {
"get": "https://api.replicate.com/v1/predictions/9e7sq1d8ndrgj0cg8eqsa578s4",
"cancel": "https://api.replicate.com/v1/predictions/9e7sq1d8ndrgj0cg8eqsa578s4/cancel"
},
"version": "0a0d21366b464cc0d6618f0a8da2ecbd22290423faa800a97e733be748a68616"
}
Using seed: 47765
loading custom weights
weights not in cache
Ensuring enough disk space...
Free disk space: 1634558648320
Downloading weights: https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
downloading https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
2024-06-23T06:01:44Z | INFO | [ Initiating ] chunk_size=150M dest=./weights-cache/931be18428e37365 url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
2024-06-23T06:01:46Z | INFO | [ Complete ] dest=./weights-cache/931be18428e37365 size="186 MB" total_elapsed=2.434s url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
b''
Downloaded weights in 2.5596249103546143 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Original width:1024, height:1024
Aspect Ratio: 1.00
new_width:1024, new_height:1024
Prompt: shot in the style of <s0><s1>, a woman in alaska
text2img mode
0%| | 0/40 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.19/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▎ | 1/40 [00:00<00:14, 2.74it/s]
5%|▌ | 2/40 [00:00<00:13, 2.73it/s]
8%|▊ | 3/40 [00:01<00:13, 2.73it/s]
10%|█ | 4/40 [00:01<00:13, 2.73it/s]
12%|█▎ | 5/40 [00:01<00:12, 2.73it/s]
15%|█▌ | 6/40 [00:02<00:12, 2.74it/s]
18%|█▊ | 7/40 [00:02<00:12, 2.74it/s]
20%|██ | 8/40 [00:02<00:11, 2.74it/s]
22%|██▎ | 9/40 [00:03<00:11, 2.74it/s]
25%|██▌ | 10/40 [00:03<00:10, 2.74it/s]
28%|██▊ | 11/40 [00:04<00:10, 2.73it/s]
30%|███ | 12/40 [00:04<00:10, 2.73it/s]
32%|███▎ | 13/40 [00:04<00:09, 2.73it/s]
35%|███▌ | 14/40 [00:05<00:09, 2.73it/s]
38%|███▊ | 15/40 [00:05<00:09, 2.73it/s]
40%|████ | 16/40 [00:05<00:08, 2.73it/s]
42%|████▎ | 17/40 [00:06<00:08, 2.73it/s]
45%|████▌ | 18/40 [00:06<00:08, 2.73it/s]
48%|████▊ | 19/40 [00:06<00:07, 2.73it/s]
50%|█████ | 20/40 [00:07<00:07, 2.73it/s]
52%|█████▎ | 21/40 [00:07<00:06, 2.73it/s]
55%|█████▌ | 22/40 [00:08<00:06, 2.73it/s]
57%|█████▊ | 23/40 [00:08<00:06, 2.73it/s]
60%|██████ | 24/40 [00:08<00:05, 2.73it/s]
62%|██████▎ | 25/40 [00:09<00:05, 2.73it/s]
65%|██████▌ | 26/40 [00:09<00:05, 2.73it/s]
68%|██████▊ | 27/40 [00:09<00:04, 2.73it/s]
70%|███████ | 28/40 [00:10<00:04, 2.73it/s]
72%|███████▎ | 29/40 [00:10<00:04, 2.73it/s]
75%|███████▌ | 30/40 [00:10<00:03, 2.73it/s]
78%|███████▊ | 31/40 [00:11<00:03, 2.72it/s]
80%|████████ | 32/40 [00:11<00:02, 2.72it/s]
82%|████████▎ | 33/40 [00:12<00:02, 2.72it/s]
85%|████████▌ | 34/40 [00:12<00:02, 2.72it/s]
88%|████████▊ | 35/40 [00:12<00:01, 2.72it/s]
90%|█████████ | 36/40 [00:13<00:01, 2.72it/s]
92%|█████████▎| 37/40 [00:13<00:01, 2.72it/s]
95%|█████████▌| 38/40 [00:13<00:00, 2.72it/s]
98%|█████████▊| 39/40 [00:14<00:00, 2.72it/s]
100%|██████████| 40/40 [00:14<00:00, 2.72it/s]
100%|██████████| 40/40 [00:14<00:00, 2.73it/s]
This output was created using a different version of the model, pnyompen/sdxl-controlnet-lora-small:0a0d2136.
This model costs approximately $0.0043 to run on Replicate, or 232 runs per $1, but this varies depending on your inputs. It is also open source and you can run it on your own computer with Docker.
This model runs on Nvidia L40S GPU hardware. Predictions typically complete within 5 seconds.
This model doesn't have a readme.
This model is cold. You'll get a fast response if the model is warm and already running, and a slower response if the model is cold and starting up.
Choose a file from your machine
Hint: you can also drag files onto the input
Using seed: 47765
loading custom weights
weights not in cache
Ensuring enough disk space...
Free disk space: 1634558648320
Downloading weights: https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
downloading https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
2024-06-23T06:01:44Z | INFO | [ Initiating ] chunk_size=150M dest=./weights-cache/931be18428e37365 url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
2024-06-23T06:01:46Z | INFO | [ Complete ] dest=./weights-cache/931be18428e37365 size="186 MB" total_elapsed=2.434s url=https://pbxt.replicate.delivery/mwN3AFyYZyouOB03Uhw8ubKW9rpqMgdtL9zYV9GF2WGDiwbE/trained_model.tar
b''
Downloaded weights in 2.5596249103546143 seconds
Loading fine-tuned model
Does not have Unet. assume we are using LoRA
Loading Unet LoRA
Original width:1024, height:1024
Aspect Ratio: 1.00
new_width:1024, new_height:1024
Prompt: shot in the style of <s0><s1>, a woman in alaska
text2img mode
0%| | 0/40 [00:00<?, ?it/s]/root/.pyenv/versions/3.9.19/lib/python3.9/site-packages/diffusers/models/attention_processor.py:1468: FutureWarning: `LoRAAttnProcessor2_0` is deprecated and will be removed in version 0.26.0. Make sure use AttnProcessor2_0 instead by settingLoRA layers to `self.{to_q,to_k,to_v,to_out[0]}.lora_layer` respectively. This will be done automatically when using `LoraLoaderMixin.load_lora_weights`
deprecate(
2%|▎ | 1/40 [00:00<00:14, 2.74it/s]
5%|▌ | 2/40 [00:00<00:13, 2.73it/s]
8%|▊ | 3/40 [00:01<00:13, 2.73it/s]
10%|█ | 4/40 [00:01<00:13, 2.73it/s]
12%|█▎ | 5/40 [00:01<00:12, 2.73it/s]
15%|█▌ | 6/40 [00:02<00:12, 2.74it/s]
18%|█▊ | 7/40 [00:02<00:12, 2.74it/s]
20%|██ | 8/40 [00:02<00:11, 2.74it/s]
22%|██▎ | 9/40 [00:03<00:11, 2.74it/s]
25%|██▌ | 10/40 [00:03<00:10, 2.74it/s]
28%|██▊ | 11/40 [00:04<00:10, 2.73it/s]
30%|███ | 12/40 [00:04<00:10, 2.73it/s]
32%|███▎ | 13/40 [00:04<00:09, 2.73it/s]
35%|███▌ | 14/40 [00:05<00:09, 2.73it/s]
38%|███▊ | 15/40 [00:05<00:09, 2.73it/s]
40%|████ | 16/40 [00:05<00:08, 2.73it/s]
42%|████▎ | 17/40 [00:06<00:08, 2.73it/s]
45%|████▌ | 18/40 [00:06<00:08, 2.73it/s]
48%|████▊ | 19/40 [00:06<00:07, 2.73it/s]
50%|█████ | 20/40 [00:07<00:07, 2.73it/s]
52%|█████▎ | 21/40 [00:07<00:06, 2.73it/s]
55%|█████▌ | 22/40 [00:08<00:06, 2.73it/s]
57%|█████▊ | 23/40 [00:08<00:06, 2.73it/s]
60%|██████ | 24/40 [00:08<00:05, 2.73it/s]
62%|██████▎ | 25/40 [00:09<00:05, 2.73it/s]
65%|██████▌ | 26/40 [00:09<00:05, 2.73it/s]
68%|██████▊ | 27/40 [00:09<00:04, 2.73it/s]
70%|███████ | 28/40 [00:10<00:04, 2.73it/s]
72%|███████▎ | 29/40 [00:10<00:04, 2.73it/s]
75%|███████▌ | 30/40 [00:10<00:03, 2.73it/s]
78%|███████▊ | 31/40 [00:11<00:03, 2.72it/s]
80%|████████ | 32/40 [00:11<00:02, 2.72it/s]
82%|████████▎ | 33/40 [00:12<00:02, 2.72it/s]
85%|████████▌ | 34/40 [00:12<00:02, 2.72it/s]
88%|████████▊ | 35/40 [00:12<00:01, 2.72it/s]
90%|█████████ | 36/40 [00:13<00:01, 2.72it/s]
92%|█████████▎| 37/40 [00:13<00:01, 2.72it/s]
95%|█████████▌| 38/40 [00:13<00:00, 2.72it/s]
98%|█████████▊| 39/40 [00:14<00:00, 2.72it/s]
100%|██████████| 40/40 [00:14<00:00, 2.72it/s]
100%|██████████| 40/40 [00:14<00:00, 2.73it/s]