@inproceedings { , title = {Deep visual embedding for image classification.}, abstract = {This paper proposes a new visual embedding method for image classification. It goes further in the analogy with textual data and allows us to read visual sentences in a certain order as in the case of text. The proposed method considers the spatial relations between visual words. It uses a very popular text analysis method called `word2vec'. In this method, we learn visual dictionaries based on filters of convolution layers of the convolutional neural network (CNN), which is used to capture the visual context of images. We employee visual embedding to convert words to real vectors. We evaluate many designs of dictionary building methods. To assess the performance of the proposed method, we used CIFAR10 and MNIST datasets. The experimental results show that the proposed visual embedding method outperforms the performance of several image classification methods. Experiments also show that our method can improve image classification regardless the structure of the CNN.}, conference = {2018 international conference on Innovative trends in computer engineering (ITCE 2018)}, doi = {10.1109/ITCE.2018.8316596}, isbn = {9781538608777}, note = {INFO COMPLETE (Record added by contact) PERMISSION GRANTED (version = AAM; embargo = none; licence = Pub's own; POLICY = https://www.ieee.org/publications/rights/rights-policies.html DOCUMENT READY (AAM rec'd 18/1/2022 LM) ADDITIONAL INFO - Contact: Mostafa Sarker}, pages = {31-35}, publicationstatus = {Published}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, url = {https://rgu-repository.worktribe.com/output/1542133}, keyword = {Deep learning, Embedding, Image classification}, year = {2018}, author = {Saleh, Adel and Abdel-Nasser, Mohamed and Sarker, Md. Mostafa Kamal and Singh, Vivek Kumar and Abdulwahab, Saddam and Saffari, Nasibeh and Garcia, Miguel Angel and Puig, Domenec} }