Monday, 21 September 2020

HOW TO CONVER IMAGES INTO ARRAYS USING cv2

import os
import cv2
import numpy as np 

from tqdm import tqdm 

training_data  = []

for i in tqdm(range(train.shape[0])):

    path = images_path+"/"+train.UID[i]+".jpeg"

    img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)

    img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))

    img = img/255.

    img = img.reshape(1,-1)  # convert(28*28)  =>(784)

=    training_data.append([np.array(img),np.array(train['growth_stage'][i])])

    

len(training_data)

#save the data 

np.random.shuffle(training_data)

np.save("training_data.npy",training_data)



#load the data

%time data = np.load("training_data.npy",allow_pickle=True)

len(data)


Wednesday, 16 September 2020

HOW TO PLOT IMAGES FORM DATALOADER

 # get some random training images

dataiter = iter(train_loader)

images, labels = dataiter.next()

arthopod_types = {0: 'Coleoptera', 1: 'Diptera', 2: 'Hymenoptera', 3: 'Lepidoptera'}

# Viewing data examples used for training

fig, axis = plt.subplots(3, 5, figsize=(15, 10))

for i, ax in enumerate(axis.flat):

    with torch.no_grad():

        image, label = images[i], labels[i]

        ax.imshow(img_display(image)) # add image

        ax.set(title = f"{arthopod_types[label.item()]}") # add label

Tuesday, 15 September 2020

HOW TO SEE GPU AVAILABLITY

 import tensorflow as tf 

if tf.test.gpu_device_name(): 
    print('Default GPU Device:{}'.format(tf.test.gpu_device_name()))
else:
    print("Please install GPU version of TF")

Saturday, 12 September 2020

HOW TO RESIZE IMAGES AND AND SAVE IN DIRECTORY USING KERAS IMAGE

 


from keras.preprocessing import image
import os

for i in train["unique_id/name of image "]:
  img_path = os.path.join(path,i)
  img = image.load_img(img_path,target_size=(224,224))
  image.save_img(f"Images_resize224/{i}",img)

#make directories where one two .. is class name
dire = ['one','two','three','four','five','six','seven'] for d in dire: os.mkdir(d)

# to track number of images per class let say 150
growth_stage_count = {}
for d in range(len(dire)):
    growth_stage_count.update({d+1:0})
    
#copy 224*244 images in respective directories for i,v in enumerate (train["UID"]): gs = train['growth_stage'].iloc[i] growth_stage_count[gs]+=1 if growth_stage_count[gs] <=150: v = v+'.jpeg' img_path = os.path.join('Images/',v) print(img_path) img = image.load_img(img_path,target_size=(224,224)) image.save_img(f"{dire[gs-1]}/{v}.jpeg",img)

Thursday, 10 September 2020

ZINDI - CGIAR Wheat Growth Stage Challenge by CGIAR Platform for Big Data in Agriculture

 FAST AI MODEL



#FastAI

import wandb

from wandb.fastai import WandbCallback  //// WEIGHT AND BIAS TO SEE INSIDE THE MODEL

wandb.init(project="wheat-zindi")


#HERE IMAGE NAMES IN CSV FILE AND IMAGES NAME AS COLUMN

import pandas as pd

from fastai.vision import *


train = pd.read_csv("./train.csv")

train['UID']  = train['UID'] + ".jpeg"

data  = ImageDataBunch.from_df("./Images",df=train, ds_tfms=get_transforms(),bs = 64,size = 224)

learn = cnn_learner(data, models.densenet121, metrics=[accuracy, top_k_accuracy], callback_fns=[WandbCallback])

learn.fit_one_cycle(4)