Compare commits

..

6 Commits

Author SHA1 Message Date
6edf6e5815 Stuff 2023-01-01 05:22:41 +00:00
6a550d5847 Example client 2022-12-13 21:23:21 +13:00
7dba63c954 Working image server 2022-12-13 21:23:05 +13:00
d261d8e46f Ignore png and pycache 2022-12-13 21:22:41 +13:00
59307bff41 Ignore images and models folders 2022-12-13 19:14:55 +13:00
40b657d45c Add image server 2022-12-13 19:11:59 +13:00
7 changed files with 215 additions and 13 deletions

4
.gitignore vendored
View File

@@ -1,2 +1,6 @@
*.jpeg *.jpeg
*.jpg *.jpg
*.png
imageserver/images/
imageserver/models
__pycache__

1
imageserver/.env Normal file
View File

@@ -0,0 +1 @@
TOKEN="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"

15
imageserver/Dockerfile Normal file
View File

@@ -0,0 +1,15 @@
FROM nvidia/cuda:11.6.0-base-ubuntu20.04
RUN apt-get update && apt-get install python3 python3-pip -y
RUN pip3 install --upgrade diffusers transformers scipy python-dotenv cuda-python fastapi uvicorn httplib2 && \
pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu117 && \
pip3 install torch==1.11.0+cu115 torchvision==0.12.0+cu115 torchaudio==0.11.0+cu115 -f https://download.pytorch.org/whl/torch_stable.html && \
pip3 install python-multipart accelerate
WORKDIR /app
COPY main.py /app/main.py
VOLUME /root/.cache/huggingface/diffusers/
CMD [ "uvicorn", "main:app", "--host", "0.0.0.0", "--reload" ]

40
imageserver/client.py Normal file
View File

@@ -0,0 +1,40 @@
# import aiohttp
# import aiofiles
import asyncio
import requests
from io import BytesIO
from PIL import Image
import shutil
from random import randint
def main():
print("Starting")
img = Image.new('RGB', (25, 25), color = (randint(0, 255), randint(0, 255), randint(0, 255)))
img = Image.open("/home/jimmy/image.png")
byte_io = BytesIO()
img.save(byte_io, 'png')
byte_io.seek(0)
r = requests.post(url='http://localhost:8000?text=cartoon',
files={
'my_file': (
'1.png',
byte_io,
'image/png'
),
},
stream=True
)
print(r.status_code)
if r.status_code == 200:
byte_io = BytesIO(r.content)
img = Image.open(byte_io)
img.show()
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,16 @@
version: '3.9'
services:
imageserver:
image: imageserver
build: ./
runtime: nvidia
ports:
- 8000:8000
env_file:
- .env
volumes:
- ./models:/root/.cache/huggingface/diffusers/
- ./main.py:/app/main.py
- ./images:/images
restart: unless-stopped

68
imageserver/main.py Normal file
View File

@@ -0,0 +1,68 @@
from multiprocessing import context
from httplib2 import Response
import torch
import uuid
import os
from diffusers import StableDiffusionImg2ImgPipeline
from dotenv import load_dotenv
from os import getenv
from fastapi import FastAPI, Response, HTTPException, File, UploadFile
from pydantic import BaseModel
import io
from PIL.PngImagePlugin import PngInfo
from PIL import Image
load_dotenv()
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", num_inference_steps=100, revision="fp16", torch_dtype=torch.float16, use_auth_token=getenv("TOKEN"))
pipe.to("cuda")
class Text(BaseModel):
text: str
app = FastAPI()
@app.post("/",
responses = {
200: {
"content": {"image/png": {}}
}
},
response_class=Response
)
async def root(text: str, my_file: UploadFile = File(...)):
prompt = text.replace('+', ' ')
print(prompt)
request_object_content = await my_file.read()
img = Image.open(io.BytesIO(request_object_content))
height_orig = img.height
width_orig = img.width
aspect_ratio = width_orig / height_orig
width_new = 512
height_new = int(width_new / aspect_ratio)
img = img.resize((width_new, height_new), 0)
try:
resp = pipe(prompt, image=img)
print(resp)
image = resp.images[0]
except RuntimeError as e:
print(e)
raise HTTPException(status_code=202, detail="Busy")
except Exception as e:
raise HTTPException(status_code=504, detail=str(e))
if resp["nsfw_content_detected"] == [True]:
raise HTTPException(status_code=418, detail="NSFW")
imgByteArr = io.BytesIO()
image.save(imgByteArr, format="PNG")
imgByteArr = imgByteArr.getvalue()
running = False
return Response(content=imgByteArr, media_type="image/png")

80
pi.py Normal file → Executable file
View File

@@ -4,19 +4,33 @@ import cv2
from gpiozero import LED, Button from gpiozero import LED, Button
from picamera2 import Picamera2 from picamera2 import Picamera2
from time import sleep from time import sleep
import asyncio
import requests
from io import BytesIO
from PIL import Image
import shutil
from random import randint
import numpy as np
import sys
import pyautogui
red = LED(19) red = LED(19)
blue = LED(26) blue = LED(26)
green = LED(13) green = LED(13)
button = Button(5) button = Button(5)
button2 = Button(2)
button3 = Button(3)
# Grab images as numpy arrays and leave everything else to OpenCV. # Grab images as numpy arrays and leave everything else to OpenCV.
x = 1920 x = 512
y = 1080 y = 512
print(pyautogui.size())
x, y = pyautogui.size()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
#cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.resizeWindow("window", x, y)
picam2 = Picamera2() picam2 = Picamera2()
capture_config = picam2.create_preview_configuration(main={"size": (x, y), capture_config = picam2.create_preview_configuration(main={"size": (x, y),
@@ -24,25 +38,69 @@ capture_config = picam2.create_preview_configuration(main={"size": (x, y),
picam2.configure(capture_config) picam2.configure(capture_config)
picam2.start() picam2.start()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
while True: while True:
# cv2.resizeWindow("window", x, y)
while True: while True:
im = picam2.capture_array() im = picam2.capture_array()
cv2.imshow("window", im) cv2.imshow("window", im)
if cv2.waitKey(40) & 0xFF == ord('a') or button.value: if cv2.waitKey(40) & 0xFF == ord('q'):
sys.exit()
if button2.value:
break break
while button.value: while button.value:
pass pass
while not button.value: color_converted = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
sleep(0.1) img=Image.fromarray(color_converted)
while not button.value: byte_io = BytesIO()
pass img.save(byte_io, 'png')
byte_io.seek(0)
print("-------------------Generating Image----------------------")
r = requests.post(url='http://192.168.1.100:8000?text=into cyborg robot ',
files={
'my_file': (
'1.png',
byte_io,
'image/png'
),
},
stream=True
)
print(r.status_code)
if r.status_code == 200:
byte_io = BytesIO(r.content)
img = Image.open(byte_io)
# img.show()
height_orig = img.height
width_orig = img.width
aspect_ratio = width_orig / height_orig
width_new = x
height_new = int(width_new / aspect_ratio)
img = img.resize((width_new, height_new), 0)
numpy_image=np.array(img)
opencv_image=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
cv2.imshow("window", opencv_image)
print("---------------------------------Image Generated-----------------------------")
print("--------------------Sleeping-------------------------")
sleep(10)
print("--------------------Slept----------------------------")
sleep(1)