diff --git a/Tasks/KavyaV/day2.py b/Tasks/KavyaV/day2.py new file mode 100644 index 0000000..585e873 --- /dev/null +++ b/Tasks/KavyaV/day2.py @@ -0,0 +1,8 @@ +import torch +import numpy as np + +np_arr1 = np.arange(15).reshape(5,3) +np_arr2 = np.arange(12).reshape(3,4) +tensor1 = torch.from_numpy(np_arr1) +tensor2 = torch.from_numpy(np_arr2) +print(torch.matmul(tensor1,tensor2)) diff --git a/Tasks/daily tasks/Abhijeet/task1.py b/Tasks/daily tasks/Abhijeet/task1.py new file mode 100644 index 0000000..3217ca1 --- /dev/null +++ b/Tasks/daily tasks/Abhijeet/task1.py @@ -0,0 +1,19 @@ +#Day2 +# Playing around with tensors + +# 1. Create two numpy arrays of size 5x3 and 3x4. +# 2. Convert them into torch tensors. +# 3. Multiply the two tensors and print the result. + + +import numpy as np +import torch as tp + +a= np.random.rand(5, 3) +b=np.random.rand(3, 4) +a=tp.tensor(a) +b=tp.tensor(b) +print(a.size()) +print(b.size()) +Multipli_tensor=tp.mm(a,b) +print("Multipli_tensor\n",Multipli_tensor) diff --git a/Tasks/daily tasks/Abhijeet/task2.py b/Tasks/daily tasks/Abhijeet/task2.py new file mode 100644 index 0000000..6f308f0 --- /dev/null +++ b/Tasks/daily tasks/Abhijeet/task2.py @@ -0,0 +1,26 @@ +import torch.nn as nn + +import torch + +class Network(nn.Module): + def __init__(self,N_in,N_out): + super(Network, self).__init__() + self.input = nn.Linear(N_in, 500) + self.fc1 = nn.Linear(500, 120) + self.fc2 = nn.Linear(120, 84) + self.Output = nn.Linear(84, N_out) + self.sigmoid = nn.Sigmoid() + + + def forward(self, x): + x=self.input(x) + x= self.fc1(x) + x = self.sigmoid(x) + x=self.fc2(x) + x = self.Output(x) + return x + + +net = Network(600,10) +print(net) + diff --git a/Tasks/daily tasks/Abhinav K/day2_task.py b/Tasks/daily tasks/Abhinav K/day2_task.py new file mode 100644 index 0000000..9241633 --- /dev/null +++ b/Tasks/daily tasks/Abhinav K/day2_task.py @@ -0,0 +1,10 @@ +import numpy as np +import torch + +a=np.random.rand(5,3) +b=np.random.rand(3,4) +a2=torch.from_numpy(a) +b2=torch.from_numpy(b) + +c=torch.mm(a2,b2) +print(c) diff --git a/Tasks/daily tasks/Abhinav M Hari/day_2_task.py b/Tasks/daily tasks/Abhinav M Hari/day_2_task.py new file mode 100644 index 0000000..e4f96ae --- /dev/null +++ b/Tasks/daily tasks/Abhinav M Hari/day_2_task.py @@ -0,0 +1,14 @@ +import numpy as np +import torch + +#creating numpy arrays +x = np.zeros([5, 3]) +y = np.ones([3, 4]) + +#converting to tensors +x_ = torch.from_numpy(x) +y = torch.from_numpy(y) + +#Multiplying the tensors +z = torch.matmul(x_, y_) +print(z) diff --git a/Tasks/daily tasks/Abhinav M Hari/task 2.py b/Tasks/daily tasks/Abhinav M Hari/task 2.py new file mode 100644 index 0000000..88e9ef8 --- /dev/null +++ b/Tasks/daily tasks/Abhinav M Hari/task 2.py @@ -0,0 +1,18 @@ +import torch +import torch.nn as nn + +class Net(nn.Module): + def __init__(self): + super(Net, self). __init__() + self.layer1 = nn.Linear(120, 84) + self.layer2 = nn.Linear(84, 10) + + def forward(self, x): + x = self.layer1(x) + x = torch.sigmoid(self.layer2(x)) + +net = Net() +input = torch.randn(120) +output = net(input) +print(output) + diff --git a/Tasks/daily tasks/Abhinav T B/day2.py b/Tasks/daily tasks/Abhinav T B/day2.py new file mode 100644 index 0000000..a8fd0a8 --- /dev/null +++ b/Tasks/daily tasks/Abhinav T B/day2.py @@ -0,0 +1,15 @@ +import numpy as np +import torch + + +a = np.empty((5, 3)) +b = np.empty((3, 4)) + + +a = torch.from_numpy(a) +b = torch.from_numpy(b) + + +c = torch.mm(a, b) + +print(c) diff --git a/Tasks/daily tasks/Akhil G Krishnan/task2.py b/Tasks/daily tasks/Akhil G Krishnan/task2.py new file mode 100644 index 0000000..1213129 --- /dev/null +++ b/Tasks/daily tasks/Akhil G Krishnan/task2.py @@ -0,0 +1,10 @@ +import numpy as np +import torch + + +a = np.empty((5, 3)) +b = np.empty((3, 4)) +a = torch.from_numpy(a) +b = torch.from_numpy(b) +c = torch.mm(a, b) +print(c) diff --git a/Tasks/daily tasks/Akshay Narayanan/Day_2.py b/Tasks/daily tasks/Akshay Narayanan/Day_2.py new file mode 100644 index 0000000..aadb198 --- /dev/null +++ b/Tasks/daily tasks/Akshay Narayanan/Day_2.py @@ -0,0 +1,12 @@ +from __future__ import print_function +import numpy as np + +import torch + +p=np.random.random((5,3)) +q=np.random.random((3,4)) +x=torch.from_numpy(p) +y=torch.from_numpy(q) + +product=torch.matmul(x,y) +print("The product of 5x3 and 3x4 tensor are >> \n ......\n",product) diff --git a/Tasks/daily tasks/Ananthu Ajay/day2.py b/Tasks/daily tasks/Ananthu Ajay/day2.py new file mode 100644 index 0000000..4d713ab --- /dev/null +++ b/Tasks/daily tasks/Ananthu Ajay/day2.py @@ -0,0 +1,14 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import torch + +import numpy as np +x = np.random.random((5,3)) +y = np.random.random((3,4)) +a= torch.from_numpy(x) +b = torch.from_numpy(y) + +t=torch.mm(a,b) + +print(t) diff --git a/Tasks/daily tasks/Ananthu Ajay/day4.py b/Tasks/daily tasks/Ananthu Ajay/day4.py new file mode 100644 index 0000000..27f3300 --- /dev/null +++ b/Tasks/daily tasks/Ananthu Ajay/day4.py @@ -0,0 +1,32 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 4, 4) + self.conv2 = nn.Conv2d(4, 16, 4) + self.fc1 = nn.Linear(16 * 4 * 4, 120) + self.fc2 = nn.Linear(120, 10) + + def forward(self, x): + x = F.max_pool2d(F.sigmoid(self.conv1(x)), (2, 2)) + x = F.max_pool2d(F.sigmoid(self.conv2(x)), 2) + x = x.view(-1, self.num_flat_features(x)) + x = F.sigmoid(self.fc1(x)) + x = self.fc2(x) + return x + + def num_flat_features(self, x): + size = x.size()[1:] + num_features = 1 + for s in size: + num_features *= s + return num_features + + +net = Net() +print(net) diff --git a/Tasks/daily tasks/Ashwin-Rajesh/Day2.py b/Tasks/daily tasks/Ashwin-Rajesh/Day2.py new file mode 100644 index 0000000..a148baf --- /dev/null +++ b/Tasks/daily tasks/Ashwin-Rajesh/Day2.py @@ -0,0 +1,14 @@ +#!/usr/bin/python3.7 + +import numpy as np +import torch + +np.random.seed(0) + +array_a = np.random.randn(5,3) +array_b = np.random.randn(3,4) + +tensor_a = torch.tensor(array_a) +tensor_b = torch.tensor(array_b) + +print(tensor_a.matmul(tensor_b)) diff --git a/Tasks/daily tasks/Bharath.T.U/Day2_task.ipynb b/Tasks/daily tasks/Bharath.T.U/Day2_task.ipynb new file mode 100644 index 0000000..e34ce9a --- /dev/null +++ b/Tasks/daily tasks/Bharath.T.U/Day2_task.ipynb @@ -0,0 +1,121 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "#importing needed packages\n", + "import torch \n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "first numpy array \"a\" with size (5, 3) is :-\n", + "[[ 1.62434536 -0.61175641 -0.52817175]\n", + " [-1.07296862 0.86540763 -2.3015387 ]\n", + " [ 1.74481176 -0.7612069 0.3190391 ]\n", + " [-0.24937038 1.46210794 -2.06014071]\n", + " [-0.3224172 -0.38405435 1.13376944]]\n", + "second numpy array\"b\" with size (3, 4) is :-\n", + "[[-1.09989127 -0.17242821 -0.87785842 0.04221375]\n", + " [ 0.58281521 -1.10061918 1.14472371 0.90159072]\n", + " [ 0.50249434 0.90085595 -0.68372786 -0.12289023]]\n" + ] + } + ], + "source": [ + "#1st part\n", + "np.random.seed(1)\n", + "a=np.random.randn(5,3)\n", + "b=np.random.randn(3,4)\n", + "print(\"first numpy array \\\"a\\\" with size {} is :-\\n{}\".format(a.shape,a))\n", + "print(\"second numpy array\\\"b\\\" with size {} is :-\\n{}\".format(b.shape,b))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor of a:-\n", + "tensor([[ 1.6243, -0.6118, -0.5282],\n", + " [-1.0730, 0.8654, -2.3015],\n", + " [ 1.7448, -0.7612, 0.3190],\n", + " [-0.2494, 1.4621, -2.0601],\n", + " [-0.3224, -0.3841, 1.1338]], dtype=torch.float64)\n", + "tensor of b:-\n", + "tensor([[-1.0999, -0.1724, -0.8779, 0.0422],\n", + " [ 0.5828, -1.1006, 1.1447, 0.9016],\n", + " [ 0.5025, 0.9009, -0.6837, -0.1229]], dtype=torch.float64)\n" + ] + } + ], + "source": [ + "#2nd part\n", + "ta=torch.from_numpy(a)\n", + "tb=torch.from_numpy(b)\n", + "print(\"tensor of a:-\\n{}\".format(ta))\n", + "print(\"tensor of b:-\\n{}\".format(tb))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Dot product of ta and tb(ta.tb)\n", + " tensor([[-2.4085, -0.0826, -1.7651, -0.4181],\n", + " [ 0.5280, -2.8408, 3.5062, 1.0178],\n", + " [-2.2024, 0.8244, -2.6212, -0.6518],\n", + " [ 0.0912, -3.4221, 3.3012, 1.5609],\n", + " [ 0.7005, 1.4997, -0.9318, -0.4992]], dtype=torch.float64)\n" + ] + } + ], + "source": [ + "#3rd part\n", + "mul=torch.matmul(ta,tb)\n", + "print(\"Dot product of ta and tb(ta.tb)\\n\",mul)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Tasks/daily tasks/Bharath.T.U/task2.py b/Tasks/daily tasks/Bharath.T.U/task2.py new file mode 100644 index 0000000..0516230 --- /dev/null +++ b/Tasks/daily tasks/Bharath.T.U/task2.py @@ -0,0 +1,21 @@ +import torch +import torch.nn as nn +class Net(nn.Module): + + def __init__(self,n_in,n_out): + super(Net, self).__init__() + self.in_layer = nn.Linear(n_in, 512) + self.h1_layer = nn.Linear(512, 256) + self.h2_layer = nn.Linear(256, 128) + self.out_layer = nn.Linear(128, n_out) + self.activation = nn.Sigmoid() + + def forward(self, x): + x = self.in_layer(x) + x = self.h1_layer(x) + x = self.activation(x) + x = self.h2_layer(x) + x = self.out_layer(x) + return x +net = Net(1024,64) +print(net) diff --git a/Tasks/daily tasks/Farhan_Najeeb/day_2_task.py b/Tasks/daily tasks/Farhan_Najeeb/day_2_task.py new file mode 100644 index 0000000..7e71d59 --- /dev/null +++ b/Tasks/daily tasks/Farhan_Najeeb/day_2_task.py @@ -0,0 +1,11 @@ +import torch +import numpy as np + + +x = np.ones((5, 3)) +print(x) +y = np.random.randn(3, 4) +print(y) +x = torch.from_numpy(x) +y = torch.from_numpy(y) +print(torch.mm(x, y)) diff --git a/Tasks/daily tasks/Farhan_Najeeb/day_4_task.py b/Tasks/daily tasks/Farhan_Najeeb/day_4_task.py new file mode 100644 index 0000000..d94d614 --- /dev/null +++ b/Tasks/daily tasks/Farhan_Najeeb/day_4_task.py @@ -0,0 +1,32 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 10) + + def forward(self, x): + x = F.max_pool2d(F.sigmoid(self.conv1(x)), (2, 2)) + x = F.max_pool2d(F.sigmoid(self.conv2(x)), 2) + x = x.view(-1, self.num_flat_features(x)) + x = F.sigmoid(self.fc1(x)) + x = self.fc2(x) + return x + + def num_flat_features(self, x): + size = x.size()[1:] + num_features = 1 + for s in size: + num_features *= s + return num_features + + +net = Net() +print(net) diff --git a/Tasks/daily tasks/Fausan Asharaf/task1.py b/Tasks/daily tasks/Fausan Asharaf/task1.py new file mode 100644 index 0000000..74f9cfb --- /dev/null +++ b/Tasks/daily tasks/Fausan Asharaf/task1.py @@ -0,0 +1,10 @@ +import numpy as np +import torch + +a = np.random.randint(10, size=(5, 3)) +b = np.random.randint(10, size=(3, 4)) +a_t = torch.from_numpy(a) +b_t = torch.from_numpy(b) + +c_t = torch.matmul(a_t, b_t) +print(c_t) diff --git a/Tasks/daily tasks/Fausan Asharaf/task2.py b/Tasks/daily tasks/Fausan Asharaf/task2.py new file mode 100644 index 0000000..bd0d340 --- /dev/null +++ b/Tasks/daily tasks/Fausan Asharaf/task2.py @@ -0,0 +1,23 @@ +import torch +import torch.nn as nn + + +class Net(nn.Module): + def __init__(self, D_in, D_out): + super(Net, self).__init__() + self.input = nn.Linear(D_in, 500) + self.sigmoid = nn.Sigmoid() + self.h1 = nn.Linear(500, 200) + self.h2 = nn.Linear(200, 100) + self.output = nn.Linear(100, D_out) + + def forward(self, x): + x = self.input(x) + x = self.sigmoid(self.h1(x)) + x = self.h2(x) + x = self.output(x) + return x + + +model = Net(D_in=784, D_out=10) +print(model) diff --git a/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day2_Task-checkpoint.ipynb b/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day2_Task-checkpoint.ipynb new file mode 100644 index 0000000..2fd6442 --- /dev/null +++ b/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day2_Task-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day4_Task-checkpoint.ipynb b/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day4_Task-checkpoint.ipynb new file mode 100644 index 0000000..2fd6442 --- /dev/null +++ b/Tasks/daily tasks/GopikrishnanK/.ipynb_checkpoints/Day4_Task-checkpoint.ipynb @@ -0,0 +1,6 @@ +{ + "cells": [], + "metadata": {}, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/daily tasks/GopikrishnanK/Day2_Task.ipynb b/Tasks/daily tasks/GopikrishnanK/Day2_Task.ipynb new file mode 100644 index 0000000..4a57f0e --- /dev/null +++ b/Tasks/daily tasks/GopikrishnanK/Day2_Task.ipynb @@ -0,0 +1,90 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import numpy as np\n", + "\n", + "arr1 = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12],[13,14,15]])\n", + "arr2 = np.array([[3,4,5,6],[7,8,9,10],[11,12,13,14]])\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[ 3, 4, 5, 6],\n", + " [ 7, 8, 9, 10],\n", + " [11, 12, 13, 14]], dtype=torch.int32)\n" + ] + } + ], + "source": [ + "tens1 = torch.from_numpy(arr1)\n", + "tens2 = torch.from_numpy(arr2)\n", + "\n", + "print(tens2)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor([[ 50, 56, 62, 68],\n", + " [113, 128, 143, 158],\n", + " [176, 200, 224, 248],\n", + " [239, 272, 305, 338],\n", + " [302, 344, 386, 428]], dtype=torch.int32)\n" + ] + } + ], + "source": [ + "ans = torch.mm(tens1, tens2)\n", + "print(ans)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/daily tasks/GopikrishnanK/Day4_Task.ipynb b/Tasks/daily tasks/GopikrishnanK/Day4_Task.ipynb new file mode 100644 index 0000000..afba5cf --- /dev/null +++ b/Tasks/daily tasks/GopikrishnanK/Day4_Task.ipynb @@ -0,0 +1,91 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "class DummyMod(nn.Module):\n", + " def __init__(self,ip_size):\n", + " self.ip = nn.Linear(i,64)\n", + " self.h1 = nn.Linear(64,128)\n", + " self.h2 = nn.Linear(128,32)\n", + " self.op = nn.Linear(32,1)\n", + " self.sigmo = nn.Sigmoid()\n", + " \n", + " def forward(x):\n", + " x = self.ip(x)\n", + " x = self.h1(x)\n", + " x = self.sigmo(x)\n", + " x = self.h2(x)\n", + " x = self.op(x)\n", + " return(x)\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DummyMod(\n", + " (ip): Linear(in_features=128, out_features=64, bias=True)\n", + " (h1): Linear(in_features=64, out_features=128, bias=True)\n", + " (h2): Linear(in_features=128, out_features=32, bias=True)\n", + " (op): Linear(in_features=32, out_features=1, bias=True)\n", + " (sigmo): Sigmoid()\n", + ")\n" + ] + } + ], + "source": [ + "model = DummyMod()\n", + "print(model)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Tasks/daily tasks/Hari Krishnan/Day2-task.py b/Tasks/daily tasks/Hari Krishnan/Day2-task.py new file mode 100644 index 0000000..0b98727 --- /dev/null +++ b/Tasks/daily tasks/Hari Krishnan/Day2-task.py @@ -0,0 +1,15 @@ +import numpy as np +import torch + +# Creating numpy array of the specified type +a = np.empty((5, 3)) +b = np.empty((3, 4)) + +# Converting numpy array to Torch tensors +a = torch.from_numpy(a) +b = torch.from_numpy(b) + +# Multiplying two tensors +c = torch.mm(a, b) + +print(c) diff --git a/Tasks/daily tasks/Hari Krishnan/task2.py b/Tasks/daily tasks/Hari Krishnan/task2.py new file mode 100644 index 0000000..db31752 --- /dev/null +++ b/Tasks/daily tasks/Hari Krishnan/task2.py @@ -0,0 +1,31 @@ +import torch +import torch.nn as nn + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.input = nn.Linear(784, 392) + self.hidden1 = nn.Linear(392, 98) + self.sigmoid = nn.Sigmoid() + self.hidden2 = nn.Linear(98, 14) + self.output = nn.Linear(14, 3) + + def forward(self, x): + x = self.input(x) + x = self.hidden1(x) + x = self.sigmoid(x) + x = self.hidden2(x) + x = self.output(x) + return x + + def num_flat_features(self, x): + size = x.size()[1:] + num_features = 1 + for s in size: + num_features *= s + return num_features + + +model = Net() +print(model) diff --git a/Tasks/daily tasks/Hashina/day_2.py b/Tasks/daily tasks/Hashina/day_2.py new file mode 100644 index 0000000..581fd54 --- /dev/null +++ b/Tasks/daily tasks/Hashina/day_2.py @@ -0,0 +1,8 @@ +import torch +import numpy as np +x = np.random.rand(5, 3) +y = np.random.rand(3, 4) +xt = torch.from_numpy(x) +yt = torch.from_numpy(y) +mul = torch.matmul(xt, yt) +print(mul) diff --git a/Tasks/daily tasks/Hashina/day_4.py b/Tasks/daily tasks/Hashina/day_4.py new file mode 100644 index 0000000..b4285fa --- /dev/null +++ b/Tasks/daily tasks/Hashina/day_4.py @@ -0,0 +1,18 @@ +from torch import nn + +class Network(nn.Module): + def __init__(self): + super().__init__() + + self.hidden = nn.Linear(1, 2) + self.output = nn.Linear(2, 1) + + self.sigmoid = nn.Sigmoid() + + def forward(self, x): + x = self.hidden(x) + x = self.sigmoid(x) + x = self.output(x) + + return x + diff --git a/Tasks/daily tasks/Issac_Antony/.gitkeep b/Tasks/daily tasks/Issac_Antony/.gitkeep new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Tasks/daily tasks/Issac_Antony/.gitkeep @@ -0,0 +1 @@ + diff --git a/Tasks/daily tasks/Issac_Antony/a.gitkeep b/Tasks/daily tasks/Issac_Antony/a.gitkeep new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/Tasks/daily tasks/Issac_Antony/a.gitkeep @@ -0,0 +1 @@ + diff --git a/Tasks/daily tasks/Issac_Antony/day4_task.py b/Tasks/daily tasks/Issac_Antony/day4_task.py new file mode 100644 index 0000000..d94d614 --- /dev/null +++ b/Tasks/daily tasks/Issac_Antony/day4_task.py @@ -0,0 +1,32 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 6, 5) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 10) + + def forward(self, x): + x = F.max_pool2d(F.sigmoid(self.conv1(x)), (2, 2)) + x = F.max_pool2d(F.sigmoid(self.conv2(x)), 2) + x = x.view(-1, self.num_flat_features(x)) + x = F.sigmoid(self.fc1(x)) + x = self.fc2(x) + return x + + def num_flat_features(self, x): + size = x.size()[1:] + num_features = 1 + for s in size: + num_features *= s + return num_features + + +net = Net() +print(net) diff --git a/Tasks/daily tasks/Issac_Antony/task_day2.py b/Tasks/daily tasks/Issac_Antony/task_day2.py new file mode 100644 index 0000000..61faaa7 --- /dev/null +++ b/Tasks/daily tasks/Issac_Antony/task_day2.py @@ -0,0 +1,17 @@ +import torch +import numpy as np + +a_numpy = np.random.randn(5, 3) +b_numpy = np.ones((3, 4), dtype=float) + +print(a_numpy) +print(b_numpy) + +a_torch = torch.from_numpy(a_numpy) +b_torch = torch.from_numpy(b_numpy) + +print(a_torch) +print(b_torch) + +mul = torch.mm(a_torch, b_torch) +print(mul) diff --git a/Tasks/daily tasks/Jamcey_V_P/task1.py b/Tasks/daily tasks/Jamcey_V_P/task1.py new file mode 100644 index 0000000..0eefbda --- /dev/null +++ b/Tasks/daily tasks/Jamcey_V_P/task1.py @@ -0,0 +1,9 @@ +import torch +import numpy as np + +a =np.random.randint(5,size = (5,3)) +b =np.random.randint(5,size = (3,4)) +a_tensor = torch.from_numpy(a) +b_tensor = torch.from_numpy(b) +result = torch.matmul(a_tensor,b_tensor) +print(result) diff --git a/Tasks/daily tasks/Jamcey_V_P/task2.py b/Tasks/daily tasks/Jamcey_V_P/task2.py new file mode 100644 index 0000000..aaaa057 --- /dev/null +++ b/Tasks/daily tasks/Jamcey_V_P/task2.py @@ -0,0 +1,34 @@ +import torch +import torch.nn as nn + +class neural_network(nn.Module): + def __init__(self, ): + super(neural_network, self).__init__() + + self.input_layer = 4 # Number of input units + self.hidden_layer1 = 5 # Number of hidden units + self.hidden_layer2 = 3 # Number of hidden units + self.output_layer = 1 # Number of output units + + # Weights + W1 = torch.randn(self.input_layer, self.hidden_layer1) + W2 = torch.randn(self.hidden_layer1, self.hidden_layer2) + W3 = torch.randn(self.hidden_layer2, self.output_layer) + + + # bias + B1 = torch.randn((1, self.hidden_layer1)) + B2 = torch.randn((1,self.hidden_layer2)) + B3 = torch.randn((1,self.output_layer)) + + def forward(self, X): + z1 = torch.mm(X, w1) + b1 + Relu = nn.ReLU() + a1 = Relu(z1) + z2 = torch.mm(X, w2) + b2 + Relu = nn.ReLU() + a2 = Relu(z2) + z3 = torch.mm(X, w3) + b3 + Sigmoid = nn.Sigmoid() + Result = Sigmoid(z3) + return Result diff --git a/Tasks/daily tasks/Jaseem ck/Task_1.py b/Tasks/daily tasks/Jaseem ck/Task_1.py new file mode 100644 index 0000000..7f0e599 --- /dev/null +++ b/Tasks/daily tasks/Jaseem ck/Task_1.py @@ -0,0 +1,16 @@ +import torch as th +import numpy as np + +#numpy of size 5*3 +a = np.arange(15).reshape(5,3) +#numpy of size 3*4 +b = np.arange(12).reshape(3,4) + +#Converting numpy arrays into tensors +a_torch = th.from_numpy(a) +b_torch = th.from_numpy(b) + +#Multiply the tensors to generate the product +product = th.mm(a_torch, b_torch) + +print(product) diff --git a/Tasks/daily tasks/Jaseem ck/Task_2.ipynb b/Tasks/daily tasks/Jaseem ck/Task_2.ipynb new file mode 100644 index 0000000..e670e2d --- /dev/null +++ b/Tasks/daily tasks/Jaseem ck/Task_2.ipynb @@ -0,0 +1,90 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "tinkerhubpytorch.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "p_XfBfF72I13", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "class Net(nn.Module):\n", + " def __init__(self,):\n", + " super(Net, self).__init__()\n", + " self.fc1 = nn.Linear(256,64)\n", + " self.fc2 = nn.Linear(64,32)\n", + " self.fc3 = nn.Linear(32, 10)\n", + " def forward(self, x):\n", + " x = self.fc1(x)\n", + " x = F.Sigmoid(x)\n", + " x = self.fc2(x)\n", + " x = F.Sigmoid(x)\n", + " x = self.fc3(x)\n", + " return x\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "oqld3eV4veTJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "model = Net()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "pjLG2lp2vgmS", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 104 + }, + "outputId": "52d81444-c0a7-4e70-fc62-517e035fccb2" + }, + "source": [ + "model" + ], + "execution_count": 42, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Net(\n", + " (fc1): Linear(in_features=256, out_features=64, bias=True)\n", + " (fc2): Linear(in_features=64, out_features=32, bias=True)\n", + " (fc3): Linear(in_features=32, out_features=10, bias=True)\n", + ")" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 42 + } + ] + } + ] +} \ No newline at end of file diff --git a/Tasks/daily tasks/Lija/day2Task.py b/Tasks/daily tasks/Lija/day2Task.py new file mode 100644 index 0000000..22d0cd3 --- /dev/null +++ b/Tasks/daily tasks/Lija/day2Task.py @@ -0,0 +1,29 @@ + + +#Create two numpy arrays of size 5x3 and 3x4. +#Convert them into torch tensors. +#Multiply the two tensors and print the result. + +import torch +import numpy as np + +array1=np.random.rand(5,3) +array2=np.random.rand(3,4) + +tensor1=torch.from_numpy(array1) +tensor2=torch.from_numpy(array2) +z=tensor1@tensor2 + + + + + + + + + + + + + + diff --git a/Tasks/daily tasks/Mohammed Arshu/task1.py b/Tasks/daily tasks/Mohammed Arshu/task1.py new file mode 100644 index 0000000..30ff742 --- /dev/null +++ b/Tasks/daily tasks/Mohammed Arshu/task1.py @@ -0,0 +1,9 @@ +from __future__ import print_function +import torch +import numpy as np +a = np.random.rand(5,3) +b = np.random.rand(3,4) +c = torch.from_numpy(a) +d = torch.from_numpy(b) +e = torch.mm(c,d) +print(e) diff --git a/Tasks/daily tasks/Mohammed Arshu/task2.py b/Tasks/daily tasks/Mohammed Arshu/task2.py new file mode 100644 index 0000000..e9a2b9c --- /dev/null +++ b/Tasks/daily tasks/Mohammed Arshu/task2.py @@ -0,0 +1,44 @@ +import torch +import torch.nn as nn +import torch.optim as optim + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.inputSize = 3 + self.outputSize = 1 + self.input = nn.Linear(self.inputSize, 10) + self.sigmoid = nn.Sigmoid() + self.hidden = nn.Linear(10, 15) + self.output = nn.Linear(15, self.outputSize) + def forward(self, X): + X = self.sigmoid(self.input(X)) + X = self.sigmoid(self.hidden(X)) + X = self.sigmoid(self.output(X)) + return X + +net = Net() +print(net) + +X=torch.randn(3,3) +Y=torch.tensor( + [ + [1.0], + [1.0], + [0.0] + ] +) + +net.zero_grad() +criterion = nn.MSELoss() +optimizer = optim.SGD(net.parameters(), lr=0.01) + +for i in range(10000): + output = net(X) + loss = criterion(output,Y) + print(loss) + loss.backward() + optimizer.step() + +print(net(X)) +print(Y) \ No newline at end of file diff --git a/Tasks/daily tasks/Paul Elias Sojan/day2_task.py b/Tasks/daily tasks/Paul Elias Sojan/day2_task.py new file mode 100644 index 0000000..ac65502 --- /dev/null +++ b/Tasks/daily tasks/Paul Elias Sojan/day2_task.py @@ -0,0 +1,11 @@ +import numpy as np +import torch + +arr1=np.random.rand(5,3) +arr2=np.random.rand(3,4) + +t1=torch.from_numpy(arr1) +t2=torch.from_numpy(arr2) + +print(torch.matmul(t1,t2)) + diff --git a/Tasks/daily tasks/Paul Elias Sojan/task2.py b/Tasks/daily tasks/Paul Elias Sojan/task2.py new file mode 100644 index 0000000..5ff1c38 --- /dev/null +++ b/Tasks/daily tasks/Paul Elias Sojan/task2.py @@ -0,0 +1,22 @@ +import torch +import torch.nn as nn + + +class NeuralNetwork(nn.Module): + def __init__(self): + super(NeuralNetwork, self).__init__() + self.input = nn.Linear(28*28,100) + self.fc1 = nn.Linear(100, 200) + self.sigmoid = nn.Sigmoid() + self.fc2 = nn.Linear(200, 50) + self.out = nn.Linear(50,10) + + def forward(self,x): + x=self.input(x) + x = self.sigmoid(self.fc1(x)) + x=self.sigmoid(self.fc2(x)) + x=self.out(x) + + +n = NeuralNetwork() +print(n) diff --git a/Tasks/daily tasks/README.md b/Tasks/daily tasks/README.md index 9a329e5..3b4b2b1 100644 --- a/Tasks/daily tasks/README.md +++ b/Tasks/daily tasks/README.md @@ -11,11 +11,35 @@ Use the following steps to participate in the daily tasks. # Tasks -## Day 1 +## Task 1 Day 1 was fun! -## day 2 +## Task 2 ### Playing around with tensors 1. Create two numpy arrays of size 5x3 and 3x4. 2. Convert them into torch tensors. 3. Multiply the two tensors and print the result. + +## Task 3 +### Create a neural network using pytorch +Create a neural network with the following configurations +1. An input layer. +2. Two hidden layers. +3. An output layer. +4. torch.nn.Sigmoid functions in between hidden layers. + +## Task 4 +### Tweak the hyperparameters +1. Check out the [cifar10cnn.py](https://github.com/tinkerhub-org/ComputerVision-with-PyTorch-Learning-Program/blob/master/resources/cifar10cnn.py). +2. Tweak the values of hyperparameters like number of output channels, kernel size, learning rate..etc +3. Note how the accuracy and loss changed on tweaking those params as a comment in your code. + +## Task 5 +### Play around with Pytorch Transforms. +1. Load a set of images from the internet (Be kind and empathetic. Don't use explicit contents) +2. You can use library like [PIL](https://pypi.org/project/Pillow/) +3. Go to [pytorch transforms documentation](https://pytorch.org/docs/stable/torchvision/transforms.html) here. +4. Apply various transformations and see the results. +5. You can use Jupyter notebooks! + + diff --git a/Tasks/daily tasks/Randheerrrk/day4.ipynb b/Tasks/daily tasks/Randheerrrk/day4.ipynb new file mode 100644 index 0000000..99acda8 --- /dev/null +++ b/Tasks/daily tasks/Randheerrrk/day4.ipynb @@ -0,0 +1,103 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "day4.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "HiM8CZJPk6jc", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch.nn as nn\n", + "import torch.nn.functional as f" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "sOedKVRUlJ9y", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class Net(nn.Module) :\n", + " def __init__(self) :\n", + " super(Net, self).__init__()\n", + " self.i = nn.Linear(64, 32)\n", + " self.h = nn.Linear(32, 16)\n", + " self.o = nn.Linear(16, 10)\n", + " def forward(sefl, X) :\n", + " X = self.i(X)\n", + " X = f.Sigmoid(X)\n", + " X = self.h(X)\n", + " X = f.Sigmoid(X)\n", + " X = self.o(X)\n", + " return X" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "pyQT3dA1m9Rp", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 104 + }, + "outputId": "7f7381c1-a5ee-4e08-c72e-9549892569fb" + }, + "source": [ + "model = Net()\n", + "model" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Net(\n", + " (i): Linear(in_features=64, out_features=32, bias=True)\n", + " (h): Linear(in_features=32, out_features=16, bias=True)\n", + " (o): Linear(in_features=16, out_features=10, bias=True)\n", + ")" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 6 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "XkvBVPvMnG_I", + "colab_type": "code", + "colab": {} + }, + "source": [ + "" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/Tasks/daily tasks/Randheerrrk/task1.py b/Tasks/daily tasks/Randheerrrk/task1.py new file mode 100644 index 0000000..a556a08 --- /dev/null +++ b/Tasks/daily tasks/Randheerrrk/task1.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +"""task1 + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/1KiNRo7ltyENDERowPkIDEJ1uWrDw2h4n +""" + +import torch as pt +import numpy as np + +print(pt.matmul(pt.from_numpy(np.random.randn(5, 3)), pt.from_numpy(np.random.randn(3, 4)))) \ No newline at end of file diff --git a/Tasks/daily tasks/Rinsa Fathima CM/day2_task.py b/Tasks/daily tasks/Rinsa Fathima CM/day2_task.py new file mode 100644 index 0000000..e13caa7 --- /dev/null +++ b/Tasks/daily tasks/Rinsa Fathima CM/day2_task.py @@ -0,0 +1,14 @@ +import numpy as np +import torch + +#creating numpy arrays +a=np.random.randint(15,size=(5,3)) +b=np.random.randint(5,size=(3,4)) + +#converting numpy arrays to torch tensors +c=torch.from_numpy(a) +d=torch.from_numpy(b) + +#multiplying torch tensors +product=torch.mm(c,d) +print(product) diff --git a/Tasks/daily tasks/Rinsa Fathima CM/task2.py b/Tasks/daily tasks/Rinsa Fathima CM/task2.py new file mode 100644 index 0000000..201d9d5 --- /dev/null +++ b/Tasks/daily tasks/Rinsa Fathima CM/task2.py @@ -0,0 +1,21 @@ +import torch +import torch.nn as nn + +class Net(nn.Module): + def __init__(self): + super(Net,self).__init__() + self.input=nn.Linear(400,200) + self.hidden1=nn.Linear(200,100) + self.sigmoid=nn.Sigmoid() + self.hidden2=nn.Linear(100,50) + self.output=nn.Linear(50,25) + + def forward(self,x): + x=self.input(x) + x=self.hidden1(x) + x=self.sigmoid(x) + x=self.hidden2(x) + x=self.output(x) + return x +model=Net() +print(model) \ No newline at end of file diff --git a/Tasks/daily tasks/Shivani/task1.py b/Tasks/daily tasks/Shivani/task1.py new file mode 100644 index 0000000..4ff2ddc --- /dev/null +++ b/Tasks/daily tasks/Shivani/task1.py @@ -0,0 +1,12 @@ +import numpy as np +import torch +x = np.random.rand(5,3) +print(x) +y = np.random.rand(3,4) +print(y) +xt = torch.from_numpy(x) +print(xt) +yt = torch.from_numpy(y) +print(yt) +result = torch.mm(xt,yt) +print(result) diff --git a/Tasks/daily tasks/Sourav_Menon/day_2.py b/Tasks/daily tasks/Sourav_Menon/day_2.py new file mode 100644 index 0000000..060ecbb --- /dev/null +++ b/Tasks/daily tasks/Sourav_Menon/day_2.py @@ -0,0 +1,17 @@ +import numpy +import torch + +#creating numpy array of dim-(5,3) +array_1=numpy.random.rand(5,3) + +#creating numpy array of dim-(3,4) +array_2=numpy.random.rand(3,4) + +#converting arrays into torch tensor +a=torch.from_numpy(array_1) +b=torch.from_numpy(array_2) + +#printing the prouduct of 2 torch tensors + +print(torch.matmul(a,b)) + diff --git a/Tasks/daily tasks/Swathi/day_2_tensor multiplication b/Tasks/daily tasks/Swathi/day_2_tensor multiplication new file mode 100644 index 0000000..a411c64 --- /dev/null +++ b/Tasks/daily tasks/Swathi/day_2_tensor multiplication @@ -0,0 +1,18 @@ +""" + Create two numpy arrays of size 5x3 and 3x4. + Convert them into torch tensors. + Multiply the two tensors and print the result. + +""" + +from __future__ import print_function +import torch +import numpy as np +import random + +x = np.random.randn(5, 3) +y = np.random.randn(3, 4) +x1 = torch.from_numpy(x) +y1 = torch.from_numpy(y) + +result = torch.matmul(x1, y1) diff --git a/Tasks/daily tasks/basil/day2.py b/Tasks/daily tasks/basil/day2.py new file mode 100644 index 0000000..ba9b4cc --- /dev/null +++ b/Tasks/daily tasks/basil/day2.py @@ -0,0 +1,7 @@ +import torch +import numpy as np +a=np.random.rand(5,3) +b=np.random.rand(3,4) +tensor_a=torch.from_numpy(a) +tensor_b=torch.from_numpy(b) +print(torch.mm(tensor_a,tensor_b)) diff --git a/Tasks/daily tasks/basil/task3.ipynb b/Tasks/daily tasks/basil/task3.ipynb new file mode 100644 index 0000000..085d106 --- /dev/null +++ b/Tasks/daily tasks/basil/task3.ipynb @@ -0,0 +1,132 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "task3.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "cF8e7eTbCp_A", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch\n", + "from torch import nn\n", + "import torch.nn.functional as F" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "vRm9TDVhCyUO", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class neural_net(nn.Module):\n", + " def __init__(self,input_shape,hid1,hid2,output_shape):\n", + " super().__init__()\n", + " self.layer1=nn.Linear(input_shape,hid1)\n", + " self.layer2=nn.Linear(hid1,hid2)\n", + " self.ouput=nn.Linear(hid2,output_shape)\n", + " def forward(self,x):\n", + " x=torch.sigmoid(self.layer1(x))\n", + " x=torch.sigmoid(self.layer2(x))\n", + " x=self.ouput(x)\n", + " return x" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "G5FNnlMrFRBc", + "colab_type": "code", + "colab": {} + }, + "source": [ + "moodel=neural_net(1024,256,256,1)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "XRssQTOnL7-4", + "colab_type": "code", + "outputId": "2c28c93f-8dd7-4d39-a6c0-ae9cde831dbd", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 102 + } + }, + "source": [ + "moodel" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "neural_net(\n", + " (layer1): Linear(in_features=1024, out_features=256, bias=True)\n", + " (layer2): Linear(in_features=256, out_features=256, bias=True)\n", + " (ouput): Linear(in_features=256, out_features=1, bias=True)\n", + ")" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 4 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "asRGX2GaHD_U", + "colab_type": "code", + "outputId": "13cf080a-2131-47d0-9b9c-90ec8c37f4b1", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "input=torch.randn(1,1024)\n", + "moodel(input)" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([[0.2871]], grad_fn=)" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 6 + } + ] + } + ] +} \ No newline at end of file diff --git a/Tasks/daily tasks/fazil_babu/task1.py b/Tasks/daily tasks/fazil_babu/task1.py new file mode 100644 index 0000000..7040f2d --- /dev/null +++ b/Tasks/daily tasks/fazil_babu/task1.py @@ -0,0 +1,12 @@ +# Done on day 2 + +import torch +import numpy as np + +arr1 = np.random.randn(5, 3) +arr2 = np.random.randn(3, 4) + +arr1_t = torch.tensor(arr1) +arr2_t = torch.tensor(arr2) + +print(torch.matmul(arr1_t, arr2_t)) \ No newline at end of file diff --git a/Tasks/daily tasks/fazil_babu/task2.ipynb b/Tasks/daily tasks/fazil_babu/task2.ipynb new file mode 100644 index 0000000..b24f284 --- /dev/null +++ b/Tasks/daily tasks/fazil_babu/task2.ipynb @@ -0,0 +1,73 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 229, + "metadata": {}, + "outputs": [], + "source": [ + "# Task given on day 4\n", + "\n", + "import torch\n", + "from torch import nn" + ] + }, + { + "cell_type": "code", + "execution_count": 230, + "metadata": {}, + "outputs": [], + "source": [ + "model = nn.Sequential(\n", + " nn.Linear(784, 8),\n", + " nn.Sigmoid(),\n", + " nn.Linear(8, 8),\n", + " nn.Sigmoid(),\n", + " nn.Linear(8, 8),\n", + " nn.Sigmoid(),\n", + " nn.Linear(8, 10),\n", + " nn.Softmax()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 231, + "metadata": {}, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": "Sequential(\n (0): Linear(in_features=784, out_features=8, bias=True)\n (1): Sigmoid()\n (2): Linear(in_features=8, out_features=8, bias=True)\n (3): Sigmoid()\n (4): Linear(in_features=8, out_features=8, bias=True)\n (5): Sigmoid()\n (6): Linear(in_features=8, out_features=10, bias=True)\n (7): Softmax(dim=None)\n)" + }, + "metadata": {}, + "execution_count": 231 + } + ], + "source": [ + "model" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.2-final" + }, + "orig_nbformat": 2, + "kernelspec": { + "name": "python38264bit88c5656e839e4b9dbfc47071a86401a2", + "display_name": "Python 3.8.2 64-bit" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/Tasks/daily tasks/gopi/day2.py b/Tasks/daily tasks/gopi/day2.py new file mode 100644 index 0000000..9a0d3f7 --- /dev/null +++ b/Tasks/daily tasks/gopi/day2.py @@ -0,0 +1,7 @@ +import numpy +import torch +x = numpy.ones([5, 3]) +y = numpy.ones([3, 4]) +x_ = torch.from_numpy(x) +y_ = torch.from_numpy(y) +k = torch.matmul(x_, y_) diff --git a/Tasks/daily tasks/jishnu/task_d2.py b/Tasks/daily tasks/jishnu/task_d2.py new file mode 100644 index 0000000..0b8d259 --- /dev/null +++ b/Tasks/daily tasks/jishnu/task_d2.py @@ -0,0 +1,14 @@ +import numpy as np +import torch + +np.random.seed(0) + +#creating two random numpy arrays +arr_a = np.random.randn(5,3) +arr_b = np.random.randn(3,4) + +#converting numpy array to torch tensors +tensor_a = torch.tensor(arr_a) +tensor_b = torch.tensor(arr_b) + +print(tensor_a.matmul(tensor_b)) diff --git a/Tasks/daily tasks/lafith/day2.py b/Tasks/daily tasks/lafith/day2.py new file mode 100644 index 0000000..a1651cf --- /dev/null +++ b/Tasks/daily tasks/lafith/day2.py @@ -0,0 +1,12 @@ +import torch +import numpy as np + +#Create two numpy arrays of size 5x3 and 3x4 +a=np.random.rand(5,3) +b=np.random.rand(3,4) +#Convert them into torch tensors +a_t = torch.from_numpy(a) +b_t = torch.from_numpy(b) +#Multiply the two tensors and print the result +result=torch.matmul(a_t,b_t) +print(result) diff --git a/Tasks/daily tasks/ranjith/task1.ipynb b/Tasks/daily tasks/ranjith/task1.ipynb new file mode 100644 index 0000000..ca9fccb --- /dev/null +++ b/Tasks/daily tasks/ranjith/task1.ipynb @@ -0,0 +1,278 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Copy of Ranjith.ipynb", + "provenance": [], + "collapsed_sections": [], + "authorship_tag": "ABX9TyNhhN1SrmOkGeKRAbPtOxT5", + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MKSmh4ISNpsz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch\n", + "import numpy as np" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "p2rMb2kPUYV5", + "colab_type": "code", + "outputId": "86bd996d-2708-407d-e430-0aa9566fa5ec", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 175 + } + }, + "source": [ + "q=np.random.rand(5,3)\n", + "print(q)\n", + "w=np.random.rand(3,4)\n", + "print(w)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "[[0.65072886 0.22845178 0.37238782]\n", + " [0.02598699 0.2367949 0.32273151]\n", + " [0.89165286 0.87121422 0.42468716]\n", + " [0.53881627 0.73737358 0.39073533]\n", + " [0.0239398 0.25109741 0.79745952]]\n", + "[[0.19169847 0.06496339 0.05681413 0.06623118]\n", + " [0.23049396 0.48585222 0.14580906 0.70743965]\n", + " [0.69193729 0.12576003 0.02682567 0.65850077]]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "mImmmTrQVC_s", + "colab_type": "code", + "outputId": "37798ad8-caa9-4a4f-b8d2-2246879ae9b9", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 210 + } + }, + "source": [ + "e=torch.from_numpy(q)\n", + "e.mul_(0);e.add_(2)\n", + "print(e)\n", + "print(q)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tensor([[2., 2., 2.],\n", + " [2., 2., 2.],\n", + " [2., 2., 2.],\n", + " [2., 2., 2.],\n", + " [2., 2., 2.]], dtype=torch.float64)\n", + "[[2. 2. 2.]\n", + " [2. 2. 2.]\n", + " [2. 2. 2.]\n", + " [2. 2. 2.]\n", + " [2. 2. 2.]]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GVqQuZZRV2zW", + "colab_type": "code", + "outputId": "0fb1bef3-8040-44f5-cf34-d10c29d0e323", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 141 + } + }, + "source": [ + "r=torch.tensor(w)\n", + "r.mul_(0);r.add_(2)\n", + "print(r)\n", + "print(w)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tensor([[2., 2., 2., 2.],\n", + " [2., 2., 2., 2.],\n", + " [2., 2., 2., 2.]], dtype=torch.float64)\n", + "[[0.19169847 0.06496339 0.05681413 0.06623118]\n", + " [0.23049396 0.48585222 0.14580906 0.70743965]\n", + " [0.69193729 0.12576003 0.02682567 0.65850077]]\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "hC-8TZAJWGHE", + "colab_type": "code", + "outputId": "33e77434-9b11-4e70-8bfd-67decd36a1b4", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 123 + } + }, + "source": [ + "y=np.matmul(q,r)\n", + "y" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([[12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.]], dtype=torch.float64)" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 118 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Z5jVWgfpYUzz", + "colab_type": "code", + "outputId": "698d2b6b-336f-473c-ab2c-e6bc42bc83ad", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 123 + } + }, + "source": [ + "z=torch.matmul(e,r)\n", + "z" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "tensor([[12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.],\n", + " [12., 12., 12., 12.]], dtype=torch.float64)" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 125 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "v8iknHx4bY8b", + "colab_type": "code", + "outputId": "59c48aaa-98ab-4916-acf8-66d9dcb10f08", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "type(r)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "torch.Tensor" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 123 + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "CkuhfCgXcN66", + "colab_type": "code", + "outputId": "8b01fbe3-9767-4ba4-92fe-29c9cd798159", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + } + }, + "source": [ + "type(q)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "numpy.ndarray" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 124 + } + ] + } + ] +} \ No newline at end of file diff --git a/Tasks/daily tasks/ranjith/task2.ipynb b/Tasks/daily tasks/ranjith/task2.ipynb new file mode 100644 index 0000000..d3a73ee --- /dev/null +++ b/Tasks/daily tasks/ranjith/task2.ipynb @@ -0,0 +1,96 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Welcome To Colaboratory", + "provenance": [], + "collapsed_sections": [], + "include_colab_link": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PkUAI5Oc7b3g", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import torch\n", + "from torch import nn" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "BYcO0cQT8ASJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "model=nn.Sequential(nn.Linear(784,128),\n", + " nn.Linear(128,64),\n", + " nn.Sigmoid(),\n", + " nn.Linear(64,32),\n", + " nn.Linear(32,10))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "O1jf_6B1ADY2", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 158 + }, + "outputId": "d08bb76e-f972-448e-8b91-3d8869353636" + }, + "source": [ + "model" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "Sequential(\n", + " (0): Linear(in_features=784, out_features=128, bias=True)\n", + " (1): Linear(in_features=128, out_features=64, bias=True)\n", + " (2): Sigmoid()\n", + " (3): Linear(in_features=64, out_features=32, bias=True)\n", + " (4): Linear(in_features=32, out_features=10, bias=True)\n", + ")" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 4 + } + ] + } + ] +} \ No newline at end of file diff --git a/Tasks/daily tasks/sreehari/day2.py b/Tasks/daily tasks/sreehari/day2.py new file mode 100644 index 0000000..a80b025 --- /dev/null +++ b/Tasks/daily tasks/sreehari/day2.py @@ -0,0 +1,7 @@ +import torch +import numpy as np +a=np.ones((5,3)) +b=np.ones((3,4)) +TTa=torch.from_numpy(a) +TTb=torch.from_numpy(b) +print (torch.mm(TTa,TTb)) \ No newline at end of file diff --git a/Tasks/daily tasks/sreeraj/day2.py b/Tasks/daily tasks/sreeraj/day2.py new file mode 100644 index 0000000..0401e0e --- /dev/null +++ b/Tasks/daily tasks/sreeraj/day2.py @@ -0,0 +1,9 @@ +import numpy as np +import torch + +x = np.random.rand(5,3) #created numpy array of shape 5,3 with random values +y = np.random.rand(3,2) #created numpy array of shape 3,2 with random values +a = torch.from_numpy(x) #converted numpy array into torch tensor +b = torch.from_numpy(y) +z = torch.matmul(a,b) #multiplied the 2 torch tensors +print(z) diff --git a/Tasks/daily tasks/sreeraj/day4.py b/Tasks/daily tasks/sreeraj/day4.py new file mode 100644 index 0000000..85d0d37 --- /dev/null +++ b/Tasks/daily tasks/sreeraj/day4.py @@ -0,0 +1,37 @@ +import torch +import torch.nn as nn +import torch.optim as optim + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.h1 = nn.Linear(3, 6) + self.h2 = nn.Linear(6, 6) + self.op = nn.Linear(6, 1) + self.sigmoid = nn.Sigmoid() + + + def forward(self, x): + x = self.sigmoid(self.h1(x)) + x = self.sigmoid(self.h2(x)) + x = self.op(x) + return x + + +ip = torch.randn(4, 3) +op = torch.randn(4, 1) + +model = Net() +epoch = 10000 +optimizer = optim.SGD(model.parameters(), lr=0.01) +criterion = nn.MSELoss() + +for i in range(epoch): + out = model(ip) + loss = criterion(out, op) + model.zero_grad() + loss.backward() + optimizer.step() + +print("Loss : ", loss) +print("Prediction : ", out) diff --git a/Tasks/day2.py b/Tasks/day2.py new file mode 100644 index 0000000..4d713ab --- /dev/null +++ b/Tasks/day2.py @@ -0,0 +1,14 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import torch + +import numpy as np +x = np.random.random((5,3)) +y = np.random.random((3,4)) +a= torch.from_numpy(x) +b = torch.from_numpy(y) + +t=torch.mm(a,b) + +print(t) diff --git a/Tasks/registration_task/__pycache__/main.cpython-38.pyc b/Tasks/registration_task/__pycache__/main.cpython-38.pyc new file mode 100644 index 0000000..2659702 Binary files /dev/null and b/Tasks/registration_task/__pycache__/main.cpython-38.pyc differ diff --git a/Tasks/registration_task/__pycache__/test.cpython-38-pytest-5.4.3.pyc b/Tasks/registration_task/__pycache__/test.cpython-38-pytest-5.4.3.pyc new file mode 100644 index 0000000..4f27d31 Binary files /dev/null and b/Tasks/registration_task/__pycache__/test.cpython-38-pytest-5.4.3.pyc differ diff --git a/Tasks/registration_task/main.py b/Tasks/registration_task/main.py deleted file mode 100644 index b79be69..0000000 --- a/Tasks/registration_task/main.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Implement the linear regression model using python and numpy in the following class. -The method fit() should take inputs like, -x = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]] -y = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] -""" - -import numpy - -class LinearRegression(object): - """ - An implementation of linear regression model - """ - - def fit(_input, _output): - pass - - def predict(_input): - pass - - diff --git a/Tasks/registration_task/setup.py b/Tasks/registration_task/setup.py deleted file mode 100644 index 6ced2f9..0000000 --- a/Tasks/registration_task/setup.py +++ /dev/null @@ -1,14 +0,0 @@ -from setuptools import setup, find_packages - -requirements = [ - "numpy", - "pytest" -] - -setup( - name="registration-task", - version="0.0.1", - zip_safe=False, - packages=find_packages(exclude=['tests']), - include_package_data=True, - install_requires=requirements) diff --git a/Tasks/registration_task/test.py b/Tasks/registration_task/test.py deleted file mode 100644 index dd7ff6b..0000000 --- a/Tasks/registration_task/test.py +++ /dev/null @@ -1,16 +0,0 @@ -import pytest -from main import LinearRegression -x = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]] - -y = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11] -model = LinearRegression() - -model.fit(x, y) - -linear_test_set = [([[10]], 12), ([[11]], 13)] - -@pytest.mark.parametrize(("ip", "op"), linear_test_set) -def test_code(ip, op): - assert int(model.predict(ip)[0]) == op - - diff --git a/projects/README.md b/projects/README.md index 407e4cb..6fc6f1c 100644 --- a/projects/README.md +++ b/projects/README.md @@ -1,2 +1,23 @@ -# Projects -Student projects once finished will be pushed to this monorepo as well! +# Projects +Going throught the curated content and tasks you learnt pytorch and computervision. Now is the time to do a project to validate your learning. Follow the following steps to get started with projects. + +## Guidelines +### Idea selection +- The problem statement should be a computer vision task. +- You can collect dataset from the internet or collect your own dataset. + +### Teaming up +- Teaming up is optional. If interested you can form teams talking to your fellow students. + +### Idea presentation and working +Create a github repo for your project. In the README file add the following details +1. Project name +2. Brief description about the project +3. Info about the dataset (source, format, amount..etc) +4. Info about the model you are going to be used. + + +Update your details in this [teams form](https://bit.ly/TH_PyTorchProject). Mentor will evaluate your project and give necessary feedbacks. + + + diff --git a/resources/cifar10cnn.py b/resources/cifar10cnn.py new file mode 100644 index 0000000..d8aa555 --- /dev/null +++ b/resources/cifar10cnn.py @@ -0,0 +1,98 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision +import torchvision.transforms as transforms +import torch.optim as optim + +transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize( + (0.5, 0.5, 0.5), + (0.5, 0.5, 0.5) + ) + ] +) + +trainset = torchvision.datasets.CIFAR10( + root='./data', + train=True, + download=False, + transform=transform +) + +testset = torchvision.datasets.CIFAR10( + root='./data', + train=False, + download=False, + transform=transform +) + +trainloader = torch.utils.data.DataLoader( + trainset, + batch_size=4, + shuffle=True, + num_workers=2 +) + +testloader = torch.utils.data.DataLoader( + testset, + batch_size=4, + shuffle=False, + num_workers=2 +) + +classes = ( + 'plane', 'car', 'bird', 'cat', + 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' +) + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x + + +net = Net() + +loss_function = nn.CrossEntropyLoss() +optimizer = optim.SGD( + net.parameters(), + lr=0.001 +) + +for epoch in range(2): + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # data = (inputs, labels) + inputs, labels = data + optimizer.zero_grad() + + outputs = net(inputs) + loss = loss_function(outputs, labels) + loss.backward() + optimizer.step() + + running_loss = running_loss + loss.item() + if i % 2000 == 1999: + print( + '[%d, %5d] loss: %.3f' % + (epoch + 1, i+1, running_loss/2000) + ) + running_loss = 0.0 +print("vola") diff --git a/resources/feedforward_net.py b/resources/feedforward_net.py new file mode 100644 index 0000000..f2b7e31 --- /dev/null +++ b/resources/feedforward_net.py @@ -0,0 +1,42 @@ +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.tensorboard import SummaryWriter +writer = SummaryWriter('runs/dummy_model') +x = torch.tensor([[0,0,1],[0,1,1],[1,0,1],[1,1,1]]).float() +y = torch.tensor([[0], [1], [1], [0]]).float() +class Net(nn.Module): + def __init__(self, inp, out): + super(Net, self).__init__() + self.input = nn.Linear(inp, 4) + self.sigmoid = nn.Sigmoid() + self.h1 = nn.Linear(4, 8) + self.h2 = nn.Linear(8, 16) + self.output = nn.Linear(16, out) + + def forward(self, x): + x = self.input(x) + x = self.sigmoid(self.h1(x)) + x = self.h2(x) + x = self.output(x) + return x + + +model = Net(inp=3, out=1) +for name, param in model.named_parameters(): + if param.requires_grad: + print(name) +#print(model.forward(x)) +#print(model.forward(torch.tensor([[0, 0, 1]]).float())) +model.zero_grad() +criterion = nn.MSELoss() +optimr = optim.SGD(model.parameters(), lr=0.001) +for i in range(60000): + output = model(x) + target_ = y + loss = criterion(output, target_) + print(loss) + loss.backward() + optimr.step() + +print(model(x))