Compare commits

...

4 Commits

Author SHA1 Message Date
Jimmy abc34b8d37 Ignore image.png 2022-10-21 17:43:35 +13:00
Jimmy 1571d31d31 Working with fastapi 2022-10-21 17:42:59 +13:00
Jimmy f03d90f378 Add fastapi. Cache models 2022-10-21 17:42:28 +13:00
Jimmy 5968ea305a Add vosk server. Expose port 2022-10-21 17:41:33 +13:00
4 changed files with 55 additions and 10 deletions

3
.gitignore vendored
View File

@ -1 +1,2 @@
.env
.env
image.png

View File

@ -5,6 +5,14 @@ services:
image: imageserver
build: imageserver
runtime: nvidia
command: python3 /main.py
ports:
- 8000:8000
env_file:
- .env
- .env
volumes:
- ~/Downloads/sd-v1-4-full-ema.ckpt:/app/model.ckpt
vosk:
image: alphacep/kaldi-en
ports:
- 2700:2700

View File

@ -1,8 +1,14 @@
FROM nvidia/cuda:11.6.0-base-ubuntu20.04
RUN apt-get update && apt-get install python3 python3-pip -y
RUN pip3 install --upgrade diffusers transformers scipy python-dotenv cuda-python && \
RUN pip3 install --upgrade diffusers transformers scipy python-dotenv cuda-python fastapi uvicorn && \
pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 && \
pip install torch==1.11.0+cu115 torchvision==0.12.0+cu115 torchaudio==0.11.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html
COPY main.py /main.py
WORKDIR /app
COPY main.py /app/main.py
VOLUME /root/.cache/huggingface/diffusers/
CMD [ "uvicorn", "main:app", "--host", "0.0.0.0" ]

View File

@ -1,19 +1,49 @@
from multiprocessing import context
from httplib2 import Response
import torch
import uuid
import os
from diffusers import StableDiffusionPipeline
from dotenv import load_dotenv
from os import getenv
from fastapi import FastAPI, Response
from pydantic import BaseModel
import io
load_dotenv()
# get your token at https://huggingface.co/settings/tokens
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=getenv("TOKEN"))
pipe.to("cuda")
prompt = "metal buttons are often soldiers who just got out of high school or a couple of years graduated from college easy as an air conditioned box about radar the patriot radar known as the a n n e e e pi this is an extremely powerful radar unit so powerful that they actually"
class Text(BaseModel):
text: str
for _ in range(10):
image = pipe(prompt)["sample"][0]
app = FastAPI()
image.save(f"{uuid.uuid4()}.png".replace(" ", "_"))
@app.get("/",
responses = {
200: {
"content": {"image/png": {}}
}
},
response_class=Response
)
async def root(text: Text):
# get your token at https://huggingface.co/settings/tokens
prompt = text.text
print(prompt)
image = pipe(prompt).images[0]
# print(image)
# image = Image.new('RGB', (1000, 1000), (100,200,10))
imgByteArr = io.BytesIO()
# image.save expects a file as a argument, passing a bytes io ins
image.save(imgByteArr, format="PNG")
# Turn the BytesIO object back into a bytes object
imgByteArr = imgByteArr.getvalue()
# media_type here sets the media type of the actual response sent to the client.
return Response(content=imgByteArr, media_type="image/png")