From 24d287cd904706a57b9318a949868da7be74d066 Mon Sep 17 00:00:00 2001 From: ale Date: Sun, 22 May 2022 19:17:25 +0200 Subject: [PATCH] repaired docker, added docker-compose --- Dockerfile | 27 ++++++++++++++++++--------- README.md | 38 +++++++++----------------------------- docker-compose.yml | 19 +++++++++++++++++++ server/requirements.txt | 11 ++++++----- server/server.py | 5 +++-- 5 files changed, 55 insertions(+), 45 deletions(-) create mode 100644 docker-compose.yml diff --git a/Dockerfile b/Dockerfile index c5a2611..31d9f0d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,6 @@ -FROM ubuntu:14.04 +FROM ubuntu:18.04 + +ENV DEBIAN_FRONTEND=noninteractive RUN apt-get update && apt-get install -y \ pkg-config \ @@ -8,19 +10,26 @@ RUN apt-get update && apt-get install -y \ libblas-dev \ liblapack-dev \ libatlas-base-dev \ + libsndfile1-dev \ + libasound2-dev \ + libjack-dev \ gfortran \ + ffmpeg \ + llvm-8 \ python \ - python-dev \ - python-pip \ + python3 \ + python3-dev \ + python3-pip \ + python3-venv \ + nvidia-cuda-dev \ curl && \ - curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash - && \ - apt-get install -y nodejs + curl -sL https://deb.nodesource.com/setup_10.x | bash - && \ + apt update && apt-get install -y nodejs && apt clean - -RUN pip install -U https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.12.1-cp27-none-linux_x86_64.whl +RUN pip3 install --upgrade pip COPY ./server/requirements.txt /tmp/ -RUN pip install -r /tmp/requirements.txt +RUN pip3 install -r /tmp/requirements.txt COPY . /src/ @@ -30,4 +39,4 @@ RUN npm install && npm run build WORKDIR /src/server/ EXPOSE 8080 -ENTRYPOINT python server.py +ENTRYPOINT python3 server.py diff --git a/README.md b/README.md index a380f2a..be653a9 100644 --- a/README.md +++ b/README.md @@ -18,35 +18,6 @@ Built by [Yotam Mann](https://github.com/tambien) with friends on the Magenta an A.I. Duet is composed of two parts, the front-end which is in the `static` folder and the back-end which is in the `server` folder. The front-end client creates short MIDI files using the players's input which is sent to a [Flask](http://flask.pocoo.org/) server. The server takes that MIDI input and "continues" it using [Magenta](https://github.com/tensorflow/magenta) and [TensorFlow](https://www.tensorflow.org/) which is then returned back to the client. -## INSTALLATION - -A.I. Duet only works with [Python 2.7](https://www.python.org/download/releases/2.7/) and it was tested with Node v6. There are two basic ways of installing A.I. Duet: with Docker or without Docker. - -If you already have a Python environment setup, install all of the server dependencies and start the server by typing the following in the terminal: - -```bash -cd server -pip install -r requirements.txt -``` - -If this does not work, jump down to the [Docker](#docker) installation instructions, which will walk you through installing A.I. Duet within a Docker container. - -If it _did_ install tensorflow and magenta successfully, you can run the server by typing: - -```bash -python server.py -``` - -Then to build and install the front-end Javascript code, first make sure you have [Node.js](https://nodejs.org) 6 installed. And then install of the dependencies of the project and build the code by typing the following in the terminal: - -```bash -cd static -npm install -npm run build -``` - -You can now play with A.I. Duet at [localhost:8080](http://localhost:8080). - ## DOCKER [Docker](https://www.docker.com/) is an open-source containerization software which simplifies installation across various OSes. It is the simplest method to build and install both the front-end and back-end components. Once you have Docker installed, you can just run: @@ -58,6 +29,15 @@ $ sudo docker run -t -p 8080:8080 ai-duet You can now play with A.I. Duet at [localhost:8080](http://localhost:8080). +## DOCKER-COMPOSE + +```bash +$ docker-compose build +$ docker-compose up -d +``` + +You can now play with A.I. Duet at [localhost:8080](http://localhost:8080). + ## MIDI SUPPORT The A.I. Duet supports MIDI keyboard input using [Web Midi API](https://webaudio.github.io/web-midi-api/) and the [WebMIDI](https://github.com/cotejp/webmidi) library. diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..9b87707 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,19 @@ +version: '2' + +services: + aiduet: + build: ./ + image: aiduet + container_name: aiduet + hostname: aiduet + restart: "no" + environment: + - NVIDIA_VISIBLE_DEVICES=all + devices: + - /dev/nvidia0 + - /dev/nvidiactl + - /dev/nvidia-uvm + - /dev/nvidia-uvm-tools + cap_add: + - IPC_LOCK + network_mode: host \ No newline at end of file diff --git a/server/requirements.txt b/server/requirements.txt index 27b1c93..de6afd9 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -1,5 +1,6 @@ -tensorflow==0.12.1 -magenta==0.1.8 -Flask==0.12 -gunicorn==19.6.0 -ipython==5.1.0 \ No newline at end of file +tensorflow-gpu==1.14.0 +magenta==1.1.8 +numba==0.53.1 +Flask +gunicorn +ipython diff --git a/server/server.py b/server/server.py index cb31fe8..733dc15 100644 --- a/server/server.py +++ b/server/server.py @@ -22,7 +22,7 @@ import sys if sys.version_info.major <= 2: from cStringIO import StringIO else: - from io import StringIO + from io import StringIO, BytesIO import time import json @@ -34,7 +34,8 @@ app = Flask(__name__, static_url_path='', static_folder=os.path.abspath('../stat def predict(): now = time.time() values = json.loads(request.data) - midi_data = pretty_midi.PrettyMIDI(StringIO(''.join(chr(v) for v in values))) +# midi_data = pretty_midi.PrettyMIDI(StringIO(''.join(chr(v) for v in values))) + midi_data = pretty_midi.PrettyMIDI(BytesIO(b''.join((v).to_bytes(1, 'little') for v in values))) duration = float(request.args.get('duration')) ret_midi = generate_midi(midi_data, duration) return send_file(ret_midi, attachment_filename='return.mid',