Compare commits

...

5 Commits

Author SHA1 Message Date
6edf6e5815 Stuff 2023-01-01 05:22:41 +00:00
6a550d5847 Example client 2022-12-13 21:23:21 +13:00
7dba63c954 Working image server 2022-12-13 21:23:05 +13:00
d261d8e46f Ignore png and pycache 2022-12-13 21:22:41 +13:00
59307bff41 Ignore images and models folders 2022-12-13 19:14:55 +13:00
4 changed files with 152 additions and 45 deletions

4
.gitignore vendored
View File

@@ -1,2 +1,6 @@
*.jpeg *.jpeg
*.jpg *.jpg
*.png
imageserver/images/
imageserver/models
__pycache__

40
imageserver/client.py Normal file
View File

@@ -0,0 +1,40 @@
# import aiohttp
# import aiofiles
import asyncio
import requests
from io import BytesIO
from PIL import Image
import shutil
from random import randint
def main():
print("Starting")
img = Image.new('RGB', (25, 25), color = (randint(0, 255), randint(0, 255), randint(0, 255)))
img = Image.open("/home/jimmy/image.png")
byte_io = BytesIO()
img.save(byte_io, 'png')
byte_io.seek(0)
r = requests.post(url='http://localhost:8000?text=cartoon',
files={
'my_file': (
'1.png',
byte_io,
'image/png'
),
},
stream=True
)
print(r.status_code)
if r.status_code == 200:
byte_io = BytesIO(r.content)
img = Image.open(byte_io)
img.show()
if __name__ == '__main__':
main()

View File

@@ -14,8 +14,8 @@ from PIL import Image
load_dotenv() load_dotenv()
# pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", num_inference_steps=100, revision="fp16", torch_dtype=torch.float16, use_auth_token=getenv("TOKEN")) pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", num_inference_steps=100, revision="fp16", torch_dtype=torch.float16, use_auth_token=getenv("TOKEN"))
# pipe.to("cuda") pipe.to("cuda")
class Text(BaseModel): class Text(BaseModel):
text: str text: str
@@ -25,39 +25,44 @@ class Text(BaseModel):
app = FastAPI() app = FastAPI()
@app.post("/", @app.post("/",
# responses = { responses = {
# 200: { 200: {
# "content": {"image/png": {}} "content": {"image/png": {}}
# } }
# }, },
# response_class=Response response_class=Response
) )
def root(text: str): async def root(text: str, my_file: UploadFile = File(...)):
prompt = text.replace('+', ' ') prompt = text.replace('+', ' ')
print(prompt) print(prompt)
#request_object_content = file.read() request_object_content = await my_file.read()
# img = Image.open(io.BytesIO(request_object_content)) img = Image.open(io.BytesIO(request_object_content))
height_orig = img.height
width_orig = img.width
aspect_ratio = width_orig / height_orig
width_new = 512
height_new = int(width_new / aspect_ratio)
img = img.resize((width_new, height_new), 0)
try:
resp = pipe(prompt, image=img)
print(resp)
image = resp.images[0]
except RuntimeError as e:
print(e)
raise HTTPException(status_code=202, detail="Busy")
except Exception as e:
raise HTTPException(status_code=504, detail=str(e))
if resp["nsfw_content_detected"] == [True]:
raise HTTPException(status_code=418, detail="NSFW")
imgByteArr = io.BytesIO()
image.save(imgByteArr, format="PNG")
imgByteArr = imgByteArr.getvalue()
running = False
return Response(content=imgByteArr, media_type="image/png")
# height_orig = img.height
# width_orig = img.width
# aspect_ratio = width_orig / height_orig
# width_new = 512
# height_new = int(width_new / aspect_ratio)
# img = img.resize((width_new, height_new), 0)
# try:
# resp = pipe(prompt, init_image=img)
# print(resp)
# image = resp.images[0]
# except RuntimeError as e:
# print(e)
# raise HTTPException(status_code=202, detail="Busy")
# except:
# raise HTTPException(status_code=504)
# imgByteArr = io.BytesIO()
# image.save(imgByteArr, format="PNG")
# imgByteArr = imgByteArr.getvalue()
# running = False
# return Response(content=imgByteArr, media_type="image/png")

80
pi.py Normal file → Executable file
View File

@@ -4,19 +4,33 @@ import cv2
from gpiozero import LED, Button from gpiozero import LED, Button
from picamera2 import Picamera2 from picamera2 import Picamera2
from time import sleep from time import sleep
import asyncio
import requests
from io import BytesIO
from PIL import Image
import shutil
from random import randint
import numpy as np
import sys
import pyautogui
red = LED(19) red = LED(19)
blue = LED(26) blue = LED(26)
green = LED(13) green = LED(13)
button = Button(5) button = Button(5)
button2 = Button(2)
button3 = Button(3)
# Grab images as numpy arrays and leave everything else to OpenCV. # Grab images as numpy arrays and leave everything else to OpenCV.
x = 1920 x = 512
y = 1080 y = 512
print(pyautogui.size())
x, y = pyautogui.size()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
#cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.resizeWindow("window", x, y)
picam2 = Picamera2() picam2 = Picamera2()
capture_config = picam2.create_preview_configuration(main={"size": (x, y), capture_config = picam2.create_preview_configuration(main={"size": (x, y),
@@ -24,25 +38,69 @@ capture_config = picam2.create_preview_configuration(main={"size": (x, y),
picam2.configure(capture_config) picam2.configure(capture_config)
picam2.start() picam2.start()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
# cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
while True: while True:
# cv2.resizeWindow("window", x, y)
while True: while True:
im = picam2.capture_array() im = picam2.capture_array()
cv2.imshow("window", im) cv2.imshow("window", im)
if cv2.waitKey(40) & 0xFF == ord('a') or button.value: if cv2.waitKey(40) & 0xFF == ord('q'):
sys.exit()
if button2.value:
break break
while button.value: while button.value:
pass pass
while not button.value: color_converted = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
sleep(0.1) img=Image.fromarray(color_converted)
while not button.value: byte_io = BytesIO()
pass img.save(byte_io, 'png')
byte_io.seek(0)
print("-------------------Generating Image----------------------")
r = requests.post(url='http://192.168.1.100:8000?text=into cyborg robot ',
files={
'my_file': (
'1.png',
byte_io,
'image/png'
),
},
stream=True
)
print(r.status_code)
if r.status_code == 200:
byte_io = BytesIO(r.content)
img = Image.open(byte_io)
# img.show()
height_orig = img.height
width_orig = img.width
aspect_ratio = width_orig / height_orig
width_new = x
height_new = int(width_new / aspect_ratio)
img = img.resize((width_new, height_new), 0)
numpy_image=np.array(img)
opencv_image=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
cv2.imshow("window", opencv_image)
print("---------------------------------Image Generated-----------------------------")
print("--------------------Sleeping-------------------------")
sleep(10)
print("--------------------Slept----------------------------")
sleep(1)