{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# GPU Environment Tensorflow" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "
\n", "\n", "This tutorial is available as an IPython notebook at [malaya-speech/example/gpu-environment-tensorflow](https://github.com/huseinzol05/malaya-speech/tree/master/example/gpu-environment-tensorflow).\n", " \n", "
" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import os\n", "\n", "os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n", "os.environ['CUDA_VISIBLE_DEVICES'] = '1'" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "WARNING:malaya_speech.streaming:`pyaudio` is not available, `malaya_speech.streaming.stream` is not able to use.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "CPU times: user 184 ms, sys: 17.5 ms, total: 201 ms\n", "Wall time: 93.9 ms\n" ] } ], "source": [ "%%time\n", "\n", "import malaya_speech\n", "import logging\n", "logging.basicConfig(level = logging.INFO)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### List available GPU\n", "\n", "**You must install Tensorflow GPU version first to enable GPU hardware acceleration**." ] }, { "cell_type": "code", "execution_count": 3, "metadata": { "scrolled": true }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2023-02-09 15:53:12.310410: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2023-02-09 15:53:12.325848: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.328364: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.329202: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.684062: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.684890: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.685553: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:53:12.685983: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n", "2023-02-09 15:53:12.685999: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /device:GPU:0 with 22302 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:07:00.0, compute capability: 8.6\n" ] }, { "data": { "text/plain": [ "[name: \"/device:CPU:0\"\n", " device_type: \"CPU\"\n", " memory_limit: 268435456\n", " locality {\n", " }\n", " incarnation: 11493416253575721766,\n", " name: \"/device:GPU:0\"\n", " device_type: \"GPU\"\n", " memory_limit: 23385997312\n", " locality {\n", " bus_id: 1\n", " links {\n", " }\n", " }\n", " incarnation: 16395544932201862459\n", " physical_device_desc: \"device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:07:00.0, compute capability: 8.6\"]" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from tensorflow.python.client import device_lib\n", "\n", "device_lib.list_local_devices()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Run model inside GPU\n", "\n", "We can follow steps from here https://www.tensorflow.org/guide/gpu" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "import tensorflow as tf\n", "\n", "tf.debugging.set_log_device_placement(True)" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "scrolled": false }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "INFO:malaya_speech.stt:for `malay-fleur102` language, tested on FLEURS102 `ms_my` test set, https://github.com/huseinzol05/malaya-speech/tree/master/pretrained-model/prepare-stt\n", "INFO:malaya_speech.stt:for `malay-malaya` language, tested on malaya-speech test set, https://github.com/huseinzol05/malaya-speech/tree/master/pretrained-model/prepare-stt\n", "INFO:malaya_speech.stt:for `singlish` language, tested on IMDA malaya-speech test set, https://github.com/huseinzol05/malaya-speech/tree/master/pretrained-model/prepare-stt\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
Size (MB)Quantized Size (MB)malay-malayaLanguage
hubert-conformer-tiny36.610.3{'WER': 0.238714008166, 'CER': 0.060899814, 'W...[malay]
hubert-conformer11531.1{'WER': 0.2387140081, 'CER': 0.06089981404, 'W...[malay]
hubert-conformer-large392100{'WER': 0.2203140421, 'CER': 0.0549270416, 'WE...[malay]
\n", "
" ], "text/plain": [ " Size (MB) Quantized Size (MB) \\\n", "hubert-conformer-tiny 36.6 10.3 \n", "hubert-conformer 115 31.1 \n", "hubert-conformer-large 392 100 \n", "\n", " malay-malaya \\\n", "hubert-conformer-tiny {'WER': 0.238714008166, 'CER': 0.060899814, 'W... \n", "hubert-conformer {'WER': 0.2387140081, 'CER': 0.06089981404, 'W... \n", "hubert-conformer-large {'WER': 0.2203140421, 'CER': 0.0549270416, 'WE... \n", "\n", " Language \n", "hubert-conformer-tiny [malay] \n", "hubert-conformer [malay] \n", "hubert-conformer-large [malay] " ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "malaya_speech.stt.ctc.available_transformer()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Malaya frozen graph interfaces" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### load graph\n", "\n", "All the malaya tensorflow model interface will pass vector arguments to `malaya_boilerplate.frozen_graph.load_graph`,\n", "\n", "```python\n", "def load_graph(package, frozen_graph_filename, **kwargs):\n", " \"\"\"\n", " Load frozen graph from a checkpoint.\n", "\n", " Parameters\n", " ----------\n", " frozen_graph_filename: str\n", " precision_mode: str, optional (default='FP32')\n", " change precision frozen graph, only supported one of ['BFLOAT16', 'FP16', 'FP32', 'FP64'].\n", " device: str, optional (default='CPU:0')\n", " device to use for specific model, read more at https://www.tensorflow.org/guide/gpu\n", "\n", " Returns\n", " -------\n", " result : tensorflow.Graph\n", " \"\"\"\n", "```" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### generate session\n", "\n", "After get load into the graph, it will pass the graph into `malaya_boilerplate.frozen_graph.generate_session` to generate session for Tensorflow graph,\n", "\n", "```python\n", "def generate_session(graph, **kwargs):\n", " \"\"\"\n", " Load session for a Tensorflow graph.\n", "\n", " Parameters\n", " ----------\n", " graph: tensorflow.Graph\n", " gpu_limit: float, optional (default = 0.999)\n", " limit percentage to use a gpu memory.\n", "\n", " Returns\n", " -------\n", " result : tensorflow.Session\n", " \"\"\"\n", "```" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "9f657c8612574db1b1b1ef83b479de9d", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Downloading: 0%| | 0.00/36.6M [00:00 device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:07:00.0, compute capability: 8.6\n", "2023-02-09 15:54:03.235117: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:54:03.235827: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:54:03.236258: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:54:03.236728: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:54:03.237151: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:54:03.237569: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22302 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:07:00.0, compute capability: 8.6\n" ] } ], "source": [ "tiny = malaya_speech.stt.ctc.transformer(model = 'hubert-conformer-tiny', device = 'GPU:0')" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "y, _ = malaya_speech.load('speech/example-speaker/husein-zolkepli.wav')" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2023-02-09 15:55:01.695095: I tensorflow/stream_executor/cuda/cuda_dnn.cc:369] Loaded cuDNN version 8302\n", "2023-02-09 15:55:02.590549: I tensorflow/core/platform/default/subprocess.cc:304] Start cannot spawn child process: No such file or directory\n", "2023-02-09 15:55:02.680280: I tensorflow/stream_executor/cuda/cuda_blas.cc:1760] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op CTCBeamSearchDecoder in device /job:localhost/replica:0/task:0/device:CPU:0\n", "WARNING:tensorflow:From /home/husein/dev/malaya-speech/malaya_speech/model/wav2vec.py:66: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use `tf.cast` instead.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2023-02-09 15:55:02.788906: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.789677: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.790159: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.790921: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.791811: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.792488: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.792915: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.793309: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:937] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", "2023-02-09 15:55:02.793700: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1510] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22302 MB memory: -> device: 0, name: NVIDIA GeForce RTX 3090 Ti, pci bus id: 0000:07:00.0, compute capability: 8.6\n", "WARNING:tensorflow:From /home/husein/dev/malaya-speech/malaya_speech/model/wav2vec.py:66: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", "Instructions for updating:\n", "Use `tf.cast` instead.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Executing op Cast in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op Fill in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op SparseToDense in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op _EagerConst in device /job:localhost/replica:0/task:0/device:GPU:0\n", "Executing op StridedSlice in device /job:localhost/replica:0/task:0/device:GPU:0\n" ] }, { "data": { "text/plain": [ "['testing nama saya busin bian zokeple']" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tiny.predict([y])" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Thu Feb 9 15:55:17 2023 \r\n", "+-----------------------------------------------------------------------------+\r\n", "| NVIDIA-SMI 470.161.03 Driver Version: 470.161.03 CUDA Version: 11.4 |\r\n", "|-------------------------------+----------------------+----------------------+\r\n", "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\r\n", "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\r\n", "| | | MIG M. |\r\n", "|===============================+======================+======================|\r\n", "| 0 NVIDIA GeForce ... Off | 00000000:01:00.0 Off | Off |\r\n", "| 47% 61C P2 345W / 350W | 22423MiB / 24256MiB | 97% Default |\r\n", "| | | N/A |\r\n", "+-------------------------------+----------------------+----------------------+\r\n", "| 1 NVIDIA GeForce ... Off | 00000000:07:00.0 Off | Off |\r\n", "| 0% 47C P2 106W / 350W | 4146MiB / 24256MiB | 0% Default |\r\n", "| | | N/A |\r\n", "+-------------------------------+----------------------+----------------------+\r\n", " \r\n", "+-----------------------------------------------------------------------------+\r\n", "| Processes: |\r\n", "| GPU GI CI PID Type Process name GPU Memory |\r\n", "| ID ID Usage |\r\n", "|=============================================================================|\r\n", "| 0 N/A N/A 809903 C python3 22421MiB |\r\n", "| 1 N/A N/A 1243519 C /usr/bin/python3 2481MiB |\r\n", "| 1 N/A N/A 1244555 C /usr/bin/python3 1663MiB |\r\n", "+-----------------------------------------------------------------------------+\r\n" ] } ], "source": [ "!nvidia-smi" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 2 }