diff --git a/CMakeLists.txt b/CMakeLists.txt index 28a03fb45..71bcd3e54 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,19 +27,20 @@ option(SD_BUILD_EXAMPLES "sd: build examples" ${SD_STANDALONE}) option(SD_CUBLAS "sd: cuda backend" OFF) option(SD_HIPBLAS "sd: rocm backend" OFF) option(SD_METAL "sd: metal backend" OFF) +option(SD_SYCL "sd: sycl backend" OFF) option(SD_FLASH_ATTN "sd: use flash attention for x4 less memory usage" OFF) option(SD_FAST_SOFTMAX "sd: x1.5 faster softmax, indeterministic (sometimes, same seed don't generate same image), cuda only" OFF) option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF) #option(SD_BUILD_SERVER "sd: build server example" ON) if(SD_CUBLAS) - message("Use CUBLAS as backend stable-diffusion") + message("Use CUBLAS as backend stable-diffusion") set(GGML_CUDA ON) add_definitions(-DSD_USE_CUBLAS) endif() if(SD_METAL) - message("Use Metal as backend stable-diffusion") + message("Use Metal as backend stable-diffusion") set(GGML_METAL ON) add_definitions(-DSD_USE_METAL) endif() @@ -53,6 +54,12 @@ if (SD_HIPBLAS) endif() endif () +if(SD_SYCL) + message("Use SYCL as backend stable-diffusion") + set(GGML_SYCL ON) + add_definitions(-DSD_USE_SYCL) +endif() + if(SD_FLASH_ATTN) message("Use Flash Attention for memory optimization") add_definitions(-DSD_USE_FLASH_ATTENTION) diff --git a/README.md b/README.md index a0acedc94..32caf3c21 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ Inference of [Stable Diffusion](https://github.com/CompVis/stable-diffusion) in - Accelerated memory-efficient CPU inference - Only requires ~2.3GB when using txt2img with fp16 precision to generate a 512x512 image, enabling Flash Attention just requires ~1.8GB. - AVX, AVX2 and AVX512 support for x86 architectures -- Full CUDA and Metal backend for GPU acceleration. +- Full CUDA, Metal and SYCL backend for GPU acceleration. - Can load ckpt, safetensors and diffusers models/checkpoints. Standalone VAEs models - No need to convert to `.ggml` or `.gguf` anymore! - Flash Attention for memory usage optimization (only cpu for now) @@ -142,6 +142,37 @@ cmake .. -DSD_METAL=ON cmake --build . --config Release ``` +##### Using SYCL + +Using SYCL makes the computation run on the Intel GPU. Please make sure you have installed the related driver and [IntelĀ® oneAPI Base toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) before start. More details and steps can refer to [llama.cpp SYCL backend](https://github.com/ggerganov/llama.cpp/blob/master/docs/backend/SYCL.md#linux). + +``` +# Export relevant ENV variables +source /opt/intel/oneapi/setvars.sh + +# Option 1: Use FP32 (recommended for better performance in most cases) +cmake .. -DSD_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx + +# Option 2: Use FP16 +cmake .. -DSD_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON + +cmake --build . --config Release +``` + +Example of text2img by using SYCL backend: + +- download `stable-diffusion` model weight, refer to [download-weight](#download-weights). + +- run `./bin/sd -m ../models/sd3_medium_incl_clips_t5xxlfp16.safetensors --cfg-scale 5 --steps 30 --sampling-method euler -H 512 -W 512 --seed 42 -p "fantasy medieval village world inside a glass sphere , high detail, fantasy, realistic, light effect, hyper detail, volumetric lighting, cinematic, macro, depth of field, blur, red light and clouds from the back, highly detailed epic cinematic concept art cg render made in maya, blender and photoshop, octane render, excellent composition, dynamic dramatic cinematic lighting, aesthetic, very inspirational, world inside a glass sphere by james gurney by artgerm with james jean, joe fenton and tristan eaton by ross tran, fine details, 4k resolution"` + +

+ +

+ +> [!NOTE] +> Try to set smaller image height and width (for example, `-H 512 -W 512`) if you meet `Provided range is out of integer limits. Pass '-fno-sycl-id-queries-fit-in-int' to disable range check.` + + ##### Using Flash Attention Enabling flash attention reduces memory usage by at least 400 MB. At the moment, it is not supported when CUBLAS is enabled because the kernel implementation is missing. diff --git a/assets/sycl_sd3_output.png b/assets/sycl_sd3_output.png new file mode 100644 index 000000000..bdd989a2a Binary files /dev/null and b/assets/sycl_sd3_output.png differ diff --git a/docs/photo_maker.md b/docs/photo_maker.md index 043f1ad6d..b69ad97d9 100644 --- a/docs/photo_maker.md +++ b/docs/photo_maker.md @@ -28,5 +28,5 @@ If on low memory GPUs (<= 8GB), recommend running with ```--vae-on-cpu``` option Example: ```bash -bin/sd -m ../models/sdxlUnstableDiffusers_v11.safetensors --vae ../models/sdxl_vae.safetensors --stacked-id-embd-dir ../models/photomaker-v1.safetensors --input-id-images-dir ../assets/examples/scarletthead_woman -p "a girl img, retro futurism, retro game art style but extremely beautiful, intricate details, masterpiece, best quality, space-themed, cosmic, celestial, stars, galaxies, nebulas, planets, science fiction, highly detailed" -n "realistic, photo-realistic, worst quality, greyscale, bad anatomy, bad hands, error, text" --cfg-scale 5.0 --sampling-method euler -H 1024 -W 1024 --style-ratio 10 --vae-on-cpu -o output.png +bin/sd -m ../models/sdxlUnstableDiffusers_v11.safetensors --vae ../models/sdxl_vae.safetensors --stacked-id-embd-dir ../models/photomaker-v1.safetensors --input-id-images-dir ../assets/photomaker_examples/scarletthead_woman -p "a girl img, retro futurism, retro game art style but extremely beautiful, intricate details, masterpiece, best quality, space-themed, cosmic, celestial, stars, galaxies, nebulas, planets, science fiction, highly detailed" -n "realistic, photo-realistic, worst quality, greyscale, bad anatomy, bad hands, error, text" --cfg-scale 5.0 --sampling-method euler -H 1024 -W 1024 --style-ratio 10 --vae-on-cpu -o output.png ``` \ No newline at end of file diff --git a/ggml b/ggml index 73c328781..a06c68343 160000 --- a/ggml +++ b/ggml @@ -1 +1 @@ -Subproject commit 73c3287813f8977d778d3eb5006660b5ae04f288 +Subproject commit a06c68343e9976fdfc80917a958b903a0d7c8cc6 diff --git a/ggml_extend.hpp b/ggml_extend.hpp index 14ad37c0e..47fd3a148 100644 --- a/ggml_extend.hpp +++ b/ggml_extend.hpp @@ -32,6 +32,10 @@ #include "ggml-metal.h" #endif +#ifdef SD_USE_SYCL +#include "ggml-sycl.h" +#endif + #include "rng.hpp" #include "util.h" @@ -537,7 +541,8 @@ __STATIC_INLINE__ void sd_tiling(ggml_tensor* input, ggml_tensor* output, const __STATIC_INLINE__ struct ggml_tensor* ggml_group_norm_32(struct ggml_context* ctx, struct ggml_tensor* a) { - return ggml_group_norm(ctx, a, 32); + const float eps = 1e-6f; // default eps parameter + return ggml_group_norm(ctx, a, 32, eps); } __STATIC_INLINE__ struct ggml_tensor* ggml_nn_linear(struct ggml_context* ctx, @@ -636,7 +641,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention(struct ggml_context* ctx struct ggml_tensor* k, struct ggml_tensor* v, bool mask = false) { -#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) +#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) && !defined(SD_USE_SYCL) struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head] #else float d_head = (float)q->ne[0]; @@ -728,7 +733,8 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_group_norm(struct ggml_context* ct b = ggml_reshape_4d(ctx, b, 1, 1, b->ne[0], 1); } - x = ggml_group_norm(ctx, x, num_groups); + const float eps = 1e-6f; // default eps parameter + x = ggml_group_norm(ctx, x, num_groups, eps); if (w != NULL && b != NULL) { x = ggml_mul(ctx, x, w); // b = ggml_repeat(ctx, b, x); @@ -738,7 +744,7 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_group_norm(struct ggml_context* ct } __STATIC_INLINE__ void ggml_backend_tensor_get_and_sync(ggml_backend_t backend, const struct ggml_tensor* tensor, void* data, size_t offset, size_t size) { -#ifdef SD_USE_CUBLAS +#if defined (SD_USE_CUBLAS) || defined (SD_USE_SYCL) if (!ggml_backend_is_cpu(backend)) { ggml_backend_tensor_get_async(backend, tensor, data, offset, size); ggml_backend_synchronize(backend); diff --git a/stable-diffusion.cpp b/stable-diffusion.cpp index 34bf8f527..c4705db15 100644 --- a/stable-diffusion.cpp +++ b/stable-diffusion.cpp @@ -152,13 +152,17 @@ class StableDiffusionGGML { ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); backend = ggml_backend_metal_init(); #endif +#ifdef SD_USE_SYCL + LOG_DEBUG("Using SYCL backend"); + backend = ggml_backend_sycl_init(0); +#endif if (!backend) { LOG_DEBUG("Using CPU backend"); backend = ggml_backend_cpu_init(); } #ifdef SD_USE_FLASH_ATTENTION -#if defined(SD_USE_CUBLAS) || defined(SD_USE_METAL) +#if defined(SD_USE_CUBLAS) || defined(SD_USE_METAL) || defined (SD_USE_SYCL) LOG_WARN("Flash Attention not supported with GPU Backend"); #else LOG_INFO("Flash Attention enabled"); diff --git a/upscaler.cpp b/upscaler.cpp index 0e3f95d62..2890ad34d 100644 --- a/upscaler.cpp +++ b/upscaler.cpp @@ -24,6 +24,10 @@ struct UpscalerGGML { ggml_backend_metal_log_set_callback(ggml_log_callback_default, nullptr); backend = ggml_backend_metal_init(); #endif +#ifdef SD_USE_SYCL + LOG_DEBUG("Using SYCL backend"); + backend = ggml_backend_sycl_init(0); +#endif if (!backend) { LOG_DEBUG("Using CPU backend");