Woogiepark commited on
Commit
58c0646
·
verified ·
1 Parent(s): 026bd59

Update README.md

Browse files

Optimization for Stable Diffusion v1-4

FP16 Precision: Reduces memory usage and speeds up inference.

Batch Processing: Generate multiple images in a single request to maximize GPU utilization.

Custom LoRA Training: Train on specific datasets to customize the model for unique themes (e.g., luxury, fantasy).

This approach lets you use Stable Diffusion v1-4 for photorealistic, high-quality image generation, deployable in a user-friendly interface. Let me know if you need help fine-tuning or scaling this setup!

Files changed (1) hide show
  1. README.md +98 -1
README.md CHANGED
@@ -9,4 +9,101 @@ license: bigscience-openrail-m
9
  sdk_version: 5.12.0
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  sdk_version: 5.12.0
10
  ---
11
 
12
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 # GPU support
13
+ pip install diffusers transformers flask pillow accelerate
14
+ from diffusers import StableDiffusionPipeline
15
+ import torch
16
+
17
+ # Authenticate Hugging Face
18
+ from huggingface_hub import login
19
+ login(token="your_hugging_face_token")
20
+
21
+ # Load Stable Diffusion v1-4
22
+ model_id = "CompVis/stable-diffusion-v1-4"
23
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
24
+ pipe = pipe.to("cuda") # Use GPU for faster performance
25
+ prompt = "A luxurious futuristic bathroom with marble walls and golden accents, panoramic views of a tropical jungle, ultra-realistic, 32k resolution"
26
+ num_steps = 50 # Number of diffusion steps
27
+ guidance_scale = 7.5 # Higher = more faithful to the prompt
28
+
29
+ # Generate an image
30
+ image = pipe(prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale).images[0]
31
+
32
+ # Save the image
33
+ image.save("generated_image.png")
34
+ from flask import Flask, request, jsonify, send_file
35
+ from diffusers import StableDiffusionPipeline
36
+ import torch
37
+
38
+ app = Flask(__name__)
39
+
40
+ # Load Stable Diffusion v1-4
41
+ model_id = "CompVis/stable-diffusion-v1-4"
42
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
43
+ pipe = pipe.to("cuda")
44
+
45
+ @app.route("/generate", methods=["POST"])
46
+ def generate_image():
47
+ data = request.json
48
+ prompt = data.get("prompt", "A beautiful fantasy landscape")
49
+ num_steps = data.get("steps", 50)
50
+ guidance_scale = data.get("guidance_scale", 7.5)
51
+
52
+ # Generate image
53
+ image = pipe(prompt, num_inference_steps=num_steps, guidance_scale=guidance_scale).images[0]
54
+ output_path = "output.png"
55
+ image.save(output_path)
56
+
57
+ return send_file(output_path, mimetype="image/png")
58
+
59
+ if __name__ == "__main__":
60
+ app.run(host="0.0.0.0", port=5000)
61
+ <!DOCTYPE html>
62
+ <html lang="en">
63
+ <head>
64
+ <meta charset="UTF-8">
65
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
66
+ <title>Stable Diffusion Generator</title>
67
+ </head>
68
+ <body>
69
+ <h1>Stable Diffusion v1-4 Image Generator</h1>
70
+ <form id="image-form">
71
+ <label for="prompt">Prompt:</label><br>
72
+ <input type="text" id="prompt" name="prompt" required><br><br>
73
+ <label for="steps">Inference Steps:</label><br>
74
+ <input type="number" id="steps" name="steps" value="50"><br><br>
75
+ <label for="guidance_scale">Guidance Scale:</label><br>
76
+ <input type="number" id="guidance_scale" name="guidance_scale" value="7.5"><br><br>
77
+ <button type="submit">Generate Image</button>
78
+ </form>
79
+
80
+ <h2>Generated Image:</h2>
81
+ <img id="generated-image" alt="Generated Image" style="max-width: 100%;">
82
+
83
+ <script>
84
+ document.getElementById("image-form").addEventListener("submit", async (event) => {
85
+ event.preventDefault();
86
+
87
+ const prompt = document.getElementById("prompt").value;
88
+ const steps = document.getElementById("steps").value;
89
+ const guidanceScale = document.getElementById("guidance_scale").value;
90
+
91
+ const response = await fetch("http://localhost:5000/generate", {
92
+ method: "POST",
93
+ headers: {
94
+ "Content-Type": "application/json",
95
+ },
96
+ body: JSON.stringify({ prompt, steps, guidance_scale: guidanceScale }),
97
+ });
98
+
99
+ if (response.ok) {
100
+ const blob = await response.blob();
101
+ const url = URL.createObjectURL(blob);
102
+ document.getElementById("generated-image").src = url;
103
+ } else {
104
+ console.error("Error generating image");
105
+ }
106
+ });
107
+ </script>
108
+ </body>
109
+ </html>