How to optimize prediction using Keras package?

r
data_science

#1

Hi,
I am working on case studies from below link:
https://www.r-bloggers.com/deep-learning-for-brand-logo-detection/

I gone through all steps and try to get prediction from sample example image but I am getting wrong prediction my model accuracy is 51%. and I tried to change batch size and epoch but not getting correct answer.
Can anyone please tell me what is wrong in it?
my code is follows:
options(stringsAsFactors = F)
df <- read.csv("flickrData/flickr_logos_27_dataset_query_set_annotation.txt", sep="\t")
t <- unique(df[,2])

for( i in 1:length(t)){
dir.create(paste0("flickrData/train/", t[i]))
dir.create(paste0("flickrData/test/", t[i]))
}

train <- sample(df[,1], 200)
test <- df[!df[,1] %in% train,][,1]

length(train)
length(test)

copy files

for(i in train){
label <- df[df[,1] == i,][,2]
file.copy(paste0("flickrData/flickr_logos_27_dataset_images/", i), paste0("flickrData/train/", label, "/", i))
}

for(i in test){
label <- df[df[,1] == i,][,2]
file.copy(paste0("flickrData/flickr_logos_27_dataset_images/", i), paste0("flickrData/test/", label, "/", i))
}

library(keras)

install_tensorflow()

setting up the model

img_width <- 64
img_height <- 64
batch_size <- 64

train_directory <- "flickrData/train"
test_directory <- "flickrData/test"

train_generator <- flow_images_from_directory(train_directory, generator = image_data_generator(),
target_size = c(img_width, img_height), color_mode = "rgb",
class_mode = "categorical", batch_size = batch_size, shuffle = TRUE,
seed = 123)

validation_generator <- flow_images_from_directory(test_directory, generator = image_data_generator(),
target_size = c(img_width, img_height), color_mode = "rgb", classes = NULL,
class_mode = "categorical", batch_size = batch_size, shuffle = TRUE,
seed = 123)

model <- keras_model_sequential()
model %>%
layer_conv_2d(filter = 16, kernel_size = c(3,3), input_shape = c(img_width, img_height, 3)) %>%
layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%

layer_conv_2d(filter = 32, kernel_size = c(3,3)) %>%
layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%

layer_conv_2d(filter = 64, kernel_size = c(3,3)) %>%
layer_activation("relu") %>%
layer_max_pooling_2d(pool_size = c(2,2)) %>%

layer_flatten() %>%
layer_dense(64) %>%
layer_activation("relu") %>%
layer_dropout(0.5) %>%
layer_dense(28) %>% ## we have 28 classes!
layer_activation("softmax")

model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer_rmsprop(lr = 0.0001, decay = 1e-6),
metrics = "accuracy"
)

train_samples <- 200
validation_samples <- 70

hist <- model %>% fit_generator(
train_generator,
steps_per_epoch = as.integer(train_samples/batch_size),
epochs = 200,
validation_data = validation_generator,
validation_steps = as.integer(validation_samples/batch_size),
verbose=2
)

########

evaluation:

evaluate_generator(model,validation_generator, validation_samples)

##################

extract predictions! - out of sample - simple example

img_path <- "https://logorealm.com/wp-content/uploads/2017/02/adidas-trefoil-logo.png"
download.file(img_path,'test.jpg', mode = 'wb')

img <- image_load('test.jpg', target_size = c(64,64))
x <- image_to_array(img)
dim(x) <- c(1, dim(x))
prediction <- model %>% predict(x)

colnames(prediction) <- unique(df[,2])[1:28]
prediction[,which.max(prediction)]

please can someone help me in it. Here I just change df$file = df[,1] and df$label = df[,2] because in text file there there is not header so it give me error when i do unique(df$label) or df$file so for call particular column i did this.

Please someone tell me how can I increase accuracy and prediction?
Thanks in Advance