Ce serveur Gitlab sera éteint le 30 juin 2020, pensez à migrer vos projets vers les serveurs gitlab-research.centralesupelec.fr et gitlab-student.centralesupelec.fr !

Modification_CNN_for_spectrum.ipynb 20.5 KB
Newer Older
Hachemin Pierre-Yves's avatar
Hachemin Pierre-Yves committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 103,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "careful, link QogS88yyNuM needs to be removed from the list\n",
      "careful, link P6c_bRWqZ-M needs to be removed from the list\n",
      "careful, link l8G6eU9UQZE needs to be removed from the list\n",
      "careful, link Yd6jMhjyV3Q needs to be removed from the list\n",
      "careful, link lY4eHaiVK9s needs to be removed from the list\n",
      "careful, link Y56MRtY_XDw needs to be removed from the list\n",
      "careful, link PxCoKNCFAVI needs to be removed from the list\n",
      "careful, link q-dlGYfvWBs needs to be removed from the list\n",
      "careful, link xyc2wJipGJ0 needs to be removed from the list\n",
      "careful, link bt7W_R8CNNk needs to be removed from the list\n",
      "careful, link vOBHeqFMr1U needs to be removed from the list\n",
      "careful, link zan9W8HZXbs needs to be removed from the list\n",
      "careful, link oVdCoZMoOQ0 needs to be removed from the list\n",
      "careful, link yhJnsjoUheM needs to be removed from the list\n",
      "careful, link MMNDqIrgHEE needs to be removed from the list\n",
      "careful, link NlLp3JQ_NEE needs to be removed from the list\n",
      "careful, link 48IFYRycQFM needs to be removed from the list\n",
      "careful, link TAy0qbYEmM4 needs to be removed from the list\n",
      "careful, link Y1ZON66BbB0 needs to be removed from the list\n"
     ]
    }
   ],
   "source": [
    "import ast\n",
    "\n",
    "#L = ['Sithad108Og','fBNpSRtfIUA', '2e-eXJ6HgkQ', 'tN1A2mVnrOM', 'myTaigPrbsg', 'b75lZw8nkvo']\n",
    "\n",
    "\n",
    "def get_genre_from_link():\n",
    "    path = \"./Link-dictionnaries/Link-dictionnary2000.txt\"\n",
    "    file = open(path, \"r\").read()\n",
    "    dictyear = ast.literal_eval(file)\n",
    "    dict_inverse = {}\n",
    "    links_to_be_removed = []\n",
    "    for movie_id in dictyear.keys():\n",
    "        if dictyear[movie_id][1] != []:\n",
    "            dict_inverse[dictyear[movie_id][2]] = dictyear[movie_id][1][0]\n",
    "        else:\n",
    "            print(f'careful, link {dictyear[movie_id][2]} needs to be removed from the list')\n",
    "            links_to_be_removed += [dictyear[movie_id][2]]\n",
    "    return dict_inverse, links_to_be_removed\n",
    "\n",
    "\n",
    "def get_output_list(L):\n",
    "    dict_inverse, links_to_be_removed = get_genre_from_link()\n",
    "    output = []\n",
    "    for link in L:\n",
    "        if link[-1]==\".\":\n",
    "            link=link[:-1]\n",
    "        if link in links_to_be_removed:\n",
    "            L.remove(link)\n",
    "        else:\n",
    "            output += [dict_inverse[link]]\n",
    "    return output\n",
    "\n",
    "labels=get_output_list(URL2000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
SoleneDc's avatar
SoleneDc committed
73 74 75
   "metadata": {
    "collapsed": true
   },
Hachemin Pierre-Yves's avatar
Hachemin Pierre-Yves committed
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
   "outputs": [],
   "source": [
    "#!/usr/bin/python\n",
    "\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "import argparse\n",
    "import sys\n",
    "import numpy as np\n",
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.image as mpimg\n",
    "import tensorflow as tf\n",
    "import os\n",
    "import cv2\n",
    "from math import floor\n",
    "\n",
    "# >>>> Loading Spectrum data\n",
    "images = []\n",
    "URL2000=[]\n",
    "limit=0.9\n",
    "for file in os.listdir(\"spectrumImages2000\"):\n",
    "    #print(file[:-4])\n",
    "    img = cv2.imread('./spectrumImages2000/' + file, 0)\n",
    "    #image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n",
    "    URL2000 += [file[:-4]]\n",
    "    images += [img[0:2][0:100]]\n",
    "#print(len(URL2000))\n",
    "trX=images\n",
    "trY=labels\n",
    "p = np.random.permutation(range(len(trX)))#shuffle les données\n",
    "for old_index, new_index in enumerate(p):\n",
    "    trX[new_index] = trX[old_index]\n",
    "    trY[new_index] = trY[old_index]\n",
    "testX=trX[floor(limit*len(trX))+1:-1]\n",
    "testY=trY[floor(limit*len(trY))+1:-1]\n",
    "trX=trX[:floor(limit*len(trX))]\n",
    "trY=trY[:floor(limit*len(trY))]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<module 'tensorflow.examples.tutorials.mnist.input_data' from 'c:\\\\program files (x86)\\\\python36\\\\lib\\\\site-packages\\\\tensorflow\\\\examples\\\\tutorials\\\\mnist\\\\input_data.py'>\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "could not broadcast input array from shape (2,214) into shape (2)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-122-a4d49deabc37>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m    148\u001b[0m         \u001b[0mend\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mstart\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mBATCH_SIZE\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    149\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m%\u001b[0m\u001b[1;36m10\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 150\u001b[1;33m             \u001b[0mtrain_accuracy\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0maccuracy\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0meval\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mtrX\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my_\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtrY\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkeep_prob\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;36m1.0\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    151\u001b[0m             \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"epoch: %d, training accuracy: %g\"\u001b[0m\u001b[1;33m%\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtrain_accuracy\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    152\u001b[0m         \u001b[0moptimization_algorithm\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m{\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mtrX\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my_\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtrY\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mstart\u001b[0m\u001b[1;33m:\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkeep_prob\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;36m0.5\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\program files (x86)\\python36\\lib\\site-packages\\tensorflow\\python\\framework\\ops.py\u001b[0m in \u001b[0;36meval\u001b[1;34m(self, feed_dict, session)\u001b[0m\n\u001b[0;32m    646\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    647\u001b[0m     \"\"\"\n\u001b[1;32m--> 648\u001b[1;33m     \u001b[1;32mreturn\u001b[0m \u001b[0m_eval_using_default_session\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgraph\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msession\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    649\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    650\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\program files (x86)\\python36\\lib\\site-packages\\tensorflow\\python\\framework\\ops.py\u001b[0m in \u001b[0;36m_eval_using_default_session\u001b[1;34m(tensors, feed_dict, graph, session)\u001b[0m\n\u001b[0;32m   4756\u001b[0m                        \u001b[1;34m\"the tensor's graph is different from the session's \"\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   4757\u001b[0m                        \"graph.\")\n\u001b[1;32m-> 4758\u001b[1;33m   \u001b[1;32mreturn\u001b[0m \u001b[0msession\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   4759\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   4760\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\program files (x86)\\python36\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36mrun\u001b[1;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m    893\u001b[0m     \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    894\u001b[0m       result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[1;32m--> 895\u001b[1;33m                          run_metadata_ptr)\n\u001b[0m\u001b[0;32m    896\u001b[0m       \u001b[1;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    897\u001b[0m         \u001b[0mproto_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mc:\\program files (x86)\\python36\\lib\\site-packages\\tensorflow\\python\\client\\session.py\u001b[0m in \u001b[0;36m_run\u001b[1;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[0;32m   1095\u001b[0m             \u001b[0mfeed_handles\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0msubfeed_t\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msubfeed_val\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1096\u001b[0m           \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1097\u001b[1;33m             \u001b[0mnp_val\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masarray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msubfeed_val\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msubfeed_dtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1098\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1099\u001b[0m           if (not is_tensor_handle_feed and\n",
      "\u001b[1;32mc:\\program files (x86)\\python36\\lib\\site-packages\\numpy\\core\\numeric.py\u001b[0m in \u001b[0;36masarray\u001b[1;34m(a, dtype, order)\u001b[0m\n\u001b[0;32m    490\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    491\u001b[0m     \"\"\"\n\u001b[1;32m--> 492\u001b[1;33m     \u001b[1;32mreturn\u001b[0m \u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0ma\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcopy\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0morder\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0morder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    493\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    494\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mValueError\u001b[0m: could not broadcast input array from shape (2,214) into shape (2)"
     ]
    }
   ],
   "source": [
    "print((input_data))\n",
    "\n",
    "# >>>> Launching interactive TF session\n",
    "sess = tf.InteractiveSession()\n",
    "\n",
    "x = tf.placeholder(tf.float32, shape=[None, 784]) # =====> TO CHANGE\n",
    "y_ = tf.placeholder(tf.float32, shape=[None, 10]) # =====> 19 car 19 genres ?\n",
    "\n",
    "# >>>> Weight variable function\n",
    "\"\"\"\n",
    "Randomly initializes the weights of a variable of \n",
    "shape = 'shape_var'. This function returns a tensor of \n",
    "the specified shape filled with random values.\n",
    "\"\"\"\n",
    "def weight_variable(shape_var):\n",
    "  initial = tf.truncated_normal(shape_var, stddev=0.1)\n",
    "  return tf.Variable(initial)\n",
    "\n",
    "# >>>> Bias variable function\n",
    "\"\"\"\n",
    "Creates a constant tensor of shape = 'shape_bias' with all \n",
    "elements equal to the value 0.1.\n",
    "\"\"\"\n",
    "def bias_variable(shape_bias):\n",
    "  initial = tf.constant(0.1, shape=shape_bias)\n",
    "  return tf.Variable(initial)\n",
    "\n",
    "# >>>> Conv2d function\n",
    "\"\"\"\n",
    "Computes the convolution between a filter W and an image x. \n",
    "Parameters: stride=1, padding=0.\n",
    "\"\"\"\n",
    "def conv2d(x, W):\n",
    "  return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') # ==> on n'a pas besoin d'être en 2D nous !\n",
    "\n",
    "# >>>> Max-pooling function\n",
    "\"\"\"\n",
    "Computes the max-pooling for every patches of size 2x2 of an \n",
    "input image x.\n",
    "\"\"\"\n",
    "def max_pool_2x2(x):\n",
    "  return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='SAME')\n",
    "\n",
    "# >>>> Reshape input data vectors \n",
    "\"\"\"\n",
    "Reshape a vector of size 784x1 into a matrix of size 28x28x1.\n",
    "The parameter '-1' indicates that the size of the dimension at \n",
    "that index of the parameter, remains the same.\n",
    "\"\"\"\n",
    "x_image = tf.reshape(x, [-1,28,28,1]) \n",
    "\n",
    "# >>>> Convolutional layer 1\n",
    "\"\"\"\n",
    "Random initialization of the weights W_conv1 (filters of conv1)\n",
    "This layer will compute the convolution of 32 filters (of size 5x5) \n",
    "with the input image (third dimension = 1 indicates that the input \n",
    "tensor is one image, corresponding to the input grayscale images).\n",
    "\"\"\"\n",
    "W_conv1 = weight_variable([5, 5, 1, 32]) \n",
    "\n",
    "# > Bias of convolutional layer 1\n",
    "\"\"\"\n",
    "Initialize the bias of conv-layer 1 with a constant value of 0.1.\n",
    "The value 32 indicates that we have 32 filters in conv1 and \n",
    "thus, we will add a bias in each of these filters.\n",
    "\"\"\"\n",
    "b_conv1 = bias_variable([32]) \n",
    "\n",
    "# > Computing the output values of conv1 (feature maps)\n",
    "\"\"\"\n",
    "This will output a set of 32 feature maps of size 28x28x1.\n",
    "Each feature map will be the output of the convolution of \n",
    "one filter (among the 32 filters) with the input image.\n",
    "\"\"\"\n",
    "h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) \n",
    "\n",
    "# > Computing the output values of max-pool 1 (feature maps)\n",
    "\"\"\"\n",
    "Application of the max-pooling function on the 32 feature-maps (of size 28x28) \n",
    "obtained from previous convolutional-layer.\n",
    "This will output 32 feature-maps of size 14x14 (because we max-pool every 2x2 patches).\n",
    "\"\"\"\n",
    "h_pool1 = max_pool_2x2(h_conv1)\n",
    "\n",
    "# >>>> Convolutional layer 2\n",
    "\"\"\"\n",
    "Random initialization of the weights W_conv2 (filters of conv2).\n",
    "This layer will compute the convolution of 64 filters (of size 5x5) \n",
    "with the input images (third dimension = 32 indicates that the input \n",
    "tensor is a set of 32 images, corresponding to the feature maps of \n",
    "size 14x14 obtained after max-pool1).\n",
    "\"\"\"\n",
    "W_conv2 = weight_variable([5, 5, 32, 64]) # declaration of the weights of conv2\n",
    "b_conv2 = bias_variable([64]) # declaration of the weights of bias of conv2\n",
    "\n",
    "# > Computing the output values of conv2 (feature maps)\n",
    "\"\"\"\n",
    "output: 64 feature maps of size 14x14x1\n",
    "\"\"\"\n",
    "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) \n",
    " \n",
    "# > Computing the output values of max-pool2 (feature maps)\n",
    "\"\"\"\n",
    "output: 64 images of size 7x7x1\n",
    "\"\"\"\n",
    "h_pool2 = max_pool_2x2(h_conv2)\n",
    "\n",
    "# >>>> Fully-connected layer 1\n",
    "W_fc1 = weight_variable([7 * 7 * 64, 1024])\n",
    "b_fc1 = bias_variable([1024])\n",
    "\n",
    "# > Reshape the feature maps of max-pool2\n",
    "\"\"\"\n",
    "This will reshape the 64 feature maps of size 7x7x1\n",
    "into a vector of size 7x7x1x64 (=3136).\n",
    "\"\"\"\n",
    "h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n",
    "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "# > Dropout\n",
    "keep_prob = tf.placeholder(tf.float32)\n",
    "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "# >>>> Fully-connected layer 2\n",
    "W_fc2 = weight_variable([1024, 10])\n",
    "b_fc2 = bias_variable([10])\n",
    "y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "\n",
    "# >>>> Cost function and optimization algorithm\n",
    "cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n",
    "optimization_algorithm = tf.train.AdamOptimizer(1e-4).minimize(cost_function)\n",
    "\n",
    "correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "sess.run(tf.global_variables_initializer())\n",
    "# >>>> Training the network\n",
    "BATCH_SIZE=128\n",
    "for i in range(10000):\n",
    "    batch = mnist.train.next_batch(50)\n",
    "    p = np.random.permutation(range(len(trX)))#shuffle les données\n",
    "    for old_index, new_index in enumerate(p):\n",
    "        trX[new_index] = trX[old_index]\n",
    "        trY[new_index] = trY[old_index]\n",
    "\n",
    "    # Apprentissage avec des minibatches de taille 128\n",
    "    for start in range(0, len(trX), BATCH_SIZE):\n",
    "        end = start + BATCH_SIZE\n",
    "        if i%10 == 0:\n",
    "            train_accuracy = accuracy.eval(feed_dict={x:trX[start:end], y_: trY[start:end], keep_prob: 1.0})\n",
    "            print(\"epoch: %d, training accuracy: %g\"%(i, train_accuracy))\n",
    "        optimization_algorithm.run(feed_dict={x:trX[start:end], y_: trY[start:end], keep_prob: 0.5})\n",
    "\n",
    "# >>>> Testing the network on the Test data\n",
    "print(\"\\n\\nTest accuracy: %g\"%accuracy.eval(feed_dict={x:testX, y_: testY, keep_prob: 1.0}))\n",
    "\n",
    "\n",
    "# >>>> Displaying some feature maps\n",
    "\n",
    "# > Normalize feature maps function \n",
    "def normalize_feat_map(feat_map, width_feat_map, height_feat_map):\n",
    "   max_feat_map = np.amax(feat_map)\n",
    "   min_feat_map = np.amin(feat_map)\n",
    "   diff_max_min = max_feat_map - min_feat_map\n",
    "   for i in range(0,width_feat_map):\n",
    "      for j in range(0,height_feat_map):\n",
    "         feature_map[i, j] = (feature_map[i, j] - min_feat_map) / diff_max_min\n",
    "   return feature_map\n",
    "\n",
    "\"\"\"layer_to_extract = h_conv1 # layer from which we want to extract the feature maps\n",
    "\n",
    "# for the 5 first test images\n",
    "for img_to_test in range(1, 5):\n",
    "   extracted_feature = sess.run(layer_to_extract, feed_dict={x: mnist.test.images[img_to_test:img_to_test+1], keep_prob: 1.0}) # extract feature at layer_to_extract for test image 1\n",
    "\n",
    "   # for the 5 first filters\n",
    "   for nbr_feat_map in range(0,4):\n",
    "      feature_map = extracted_feature[:,:,:,nbr_feat_map].reshape((28,28))\n",
    "      normalize_feat_map(feature_map, 27, 27) # normalize the values of the feature map\n",
    "      plt.imshow(feature_map, cmap='gray') # display the output feature map\n",
    "      plt.show()\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
SoleneDc's avatar
SoleneDc committed
333 334 335
   "metadata": {
    "collapsed": true
   },
Hachemin Pierre-Yves's avatar
Hachemin Pierre-Yves committed
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
SoleneDc's avatar
SoleneDc committed
356
   "version": "3.6.3"
Hachemin Pierre-Yves's avatar
Hachemin Pierre-Yves committed
357 358 359 360 361
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}