-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathloopFission.c
More file actions
216 lines (168 loc) · 5.21 KB
/
loopFission.c
File metadata and controls
216 lines (168 loc) · 5.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
/*
This is the baseline implementation of a 1D Stencil operation.
Parameters:
m0 > 0: dimension of the original input and output vector(array) size
k0 > 0: dimesnion of the original weights vector(array)
float* input_sequential: pointer to original input data
float* input_distributed: pointer to the input data that you have distributed across
the system
float* output_sequential: pointer to original output data
float* output_distributed: pointer to the output data that you have distributed across
the system
float* weights_sequential: pointer to original weights data
float* weights_distributed: pointer to the weights data that you have distributed across
the system
Functions: Modify these however you please.
DISTRIBUTED_ALLOCATE_NAME(...): Allocate the distributed buffers.
DISTRIBUTE_DATA_NAME(...): takes the sequential data and distributes it across the system.
COMPUTE_NAME(...): Performs the stencil computation.
COLLECT_DATA_NAME(...): Collect the distributed output and combine it back to the sequential
one for testing.
DISTRIBUTED_FREE_NAME(...): Free the distributed buffers that were allocated
- richard.m.veras@ou.edu
*/
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef COMPUTE_NAME
#define COMPUTE_NAME baseline
#endif
#ifndef DISTRIBUTE_DATA_NAME
#define DISTRIBUTE_DATA_NAME baseline_distribute
#endif
#ifndef COLLECT_DATA_NAME
#define COLLECT_DATA_NAME baseline_collect
#endif
#ifndef DISTRIBUTED_ALLOCATE_NAME
#define DISTRIBUTED_ALLOCATE_NAME baseline_allocate
#endif
#ifndef DISTRIBUTED_FREE_NAME
#define DISTRIBUTED_FREE_NAME baseline_free
#endif
void COMPUTE_NAME(int m0, int k0,
float * input_distributed,
float * weights_distributed,
float * output_distributed)
{
/*
STUDENT_TODO: Modify as you please.
*/
int rid;
int num_ranks;
int tag = 0;
MPI_Status status;
int root_rid = 0;
MPI_Comm_rank(MPI_COMM_WORLD, & rid);
MPI_Comm_size(MPI_COMM_WORLD, & num_ranks);
if (rid == root_rid) {
// loop fission
// First Loop - Initialize output array
for (int i0 = 0; i0 < m0; ++i0) {
output_distributed[i0] = 0.0f;
}
// Second Loop - Perform computation
for (int i0 = 0; i0 < m0; ++i0) {
for (int p0 = 0; p0 < k0; ++p0) {
output_distributed[i0] += input_distributed[(p0 + i0) % m0] * weights_distributed[p0];
}
}
} else {
/* This will run on all other nodes whose rid is not root_rid. */
}
}
// Create the buffers on each node
void DISTRIBUTED_ALLOCATE_NAME(int m0, int k0,
float ** input_distributed,
float ** weights_distributed,
float ** output_distributed) {
/*
STUDENT_TODO: Modify as you please.
*/
int rid;
int num_ranks;
int tag = 0;
MPI_Status status;
int root_rid = 0;
MPI_Comm_rank(MPI_COMM_WORLD, & rid);
MPI_Comm_size(MPI_COMM_WORLD, & num_ranks);
if (rid == root_rid) {
/* This block will only run on the node that matches root_rid .*/
* input_distributed = (float * ) malloc(sizeof(float) * m0);
* output_distributed = (float * ) malloc(sizeof(float) * m0);
* weights_distributed = (float * ) malloc(sizeof(float) * k0);
} else {
/* This will run on all other nodes whose rid is not root_rid. */
}
}
void DISTRIBUTE_DATA_NAME(int m0, int k0,
float * input_sequential,
float * weights_sequential,
float * input_distributed,
float * weights_distributed) {
/*
STUDENT_TODO: Modify as you please.
*/
int rid;
int num_ranks;
int tag = 0;
MPI_Status status;
int root_rid = 0;
MPI_Comm_rank(MPI_COMM_WORLD, & rid);
MPI_Comm_size(MPI_COMM_WORLD, & num_ranks);
if (rid == root_rid) {
/* This block will only run on the node that matches root_rid .*/
// Distribute the inputs
for (int i0 = 0; i0 < m0; ++i0)
input_distributed[i0] = input_sequential[i0];
// Distribute the weights
for (int p0 = 0; p0 < k0; ++p0)
weights_distributed[p0] = weights_sequential[p0];
} else {
/* This will run on all other nodes whose rid is not root_rid. */
}
}
void COLLECT_DATA_NAME(int m0, int k0,
float * output_distributed,
float * output_sequential) {
/*
STUDENT_TODO: Modify as you please.
*/
int rid;
int num_ranks;
int tag = 0;
MPI_Status status;
int root_rid = 0;
MPI_Comm_rank(MPI_COMM_WORLD, & rid);
MPI_Comm_size(MPI_COMM_WORLD, & num_ranks);
if (rid == root_rid) {
/* This block will only run on the node that matches root_rid .*/
// Collect the output
for (int i0 = 0; i0 < m0; ++i0)
output_sequential[i0] = output_distributed[i0];
} else {
/* This will run on all other nodes whose rid is not root_rid. */
}
}
void DISTRIBUTED_FREE_NAME(int m0, int k0,
float * input_distributed,
float * weights_distributed,
float * output_distributed) {
/*
STUDENT_TODO: Modify as you please.
*/
int rid;
int num_ranks;
int tag = 0;
MPI_Status status;
int root_rid = 0;
MPI_Comm_rank(MPI_COMM_WORLD, & rid);
MPI_Comm_size(MPI_COMM_WORLD, & num_ranks);
if (rid == root_rid) {
/* This block will only run on the node that matches root_rid .*/
free(input_distributed);
free(weights_distributed);
free(output_distributed);
} else {
/* This will run on all other nodes whose rid is not root_rid. */
}
}