In [ ]:
function ex1(a)
j = 1
m = a[j]
for (i,ai) in enumerate(a)
if m < ai
m = ai
j = i
end
end
(m,j)
end
Julia Basics: Exercise 2¶
In [ ]:
ex2(f,g) = x -> f(x) + g(x)
Julia Basics: Exercise 3¶
In [ ]:
function compute_values(n,max_iters)
x = LinRange(-1.7,0.7,n)
y = LinRange(-1.2,1.2,n)
values = zeros(Int,n,n)
for j in 1:n
for i in 1:n
values[i,j] = mandel(x[i],y[j],max_iters)
end
end
values
end
values = compute_values(1000,10)
using GLMakie
heatmap(x,y,values)
Matrix Multiplication : Exercise 1¶
In [ ]:
function matmul_dist_3!(C,A,B)
m = size(C,1)
n = size(C,2)
l = size(A,2)
@assert size(A,1) == m
@assert size(B,2) == n
@assert size(B,1) == l
@assert mod(m,nworkers()) == 0
# Implement here
nrows_w = div(m,nworkers())
@sync for (i,w) in enumerate(workers())
rows_w = (1:nrows_w) .+ (i-1)*nrows_w
Aw = A[rows_w,:]
ftr = @spawnat w begin
Cw = similar(Aw,nrows_w,n)
matmul_seq!(Cw,Aw,B)
Cw
end
@async C[rows_w,:] = fetch(ftr)
end
C
end
Jacobi Method : Exercise 1¶
In [ ]:
@mpi_do manager begin
using MPI
comm = MPI.Comm_dup(MPI.COMM_WORLD)
nw = MPI.Comm_size(comm)
iw = MPI.Comm_rank(comm)+1
function jacobi_mpi(n,niters)
if mod(n,nw) != 0
println("n must be a multiple of nw")
MPI.Abort(comm,1)
end
n_own = div(n,nw)
u = zeros(n_own+2)
u[1] = -1
u[end] = 1
u_new = copy(u)
for t in 1:niters
reqs_snd = MPI.Request[]
reqs_rcv = MPI.Request[]
if iw != 1
neig_rank = (iw-1)-1
req = MPI.Isend(view(u,2:2),comm,dest=neig_rank,tag=0)
push!(reqs_snd,req)
req = MPI.Irecv!(view(u,1:1),comm,source=neig_rank,tag=0)
push!(reqs_rcv,req)
end
if iw != nw
neig_rank = (iw+1)-1
s = n_own+1
r = n_own+2
req = MPI.Isend(view(u,s:s),comm,dest=neig_rank,tag=0)
push!(reqs_snd,req)
req = MPI.Irecv!(view(u,r:r),comm,source=neig_rank,tag=0)
push!(reqs_rcv,req)
end
for i in 3:n_own
u_new[i] = 0.5*(u[i-1]+u[i+1])
end
MPI.Waitall(reqs_rcv)
for i in (2,n_own+1)
u_new[i] = 0.5*(u[i-1]+u[i+1])
end
MPI.Waitall(reqs_snd)
u, u_new = u_new, u
end
u
@show u
end
niters = 100
load = 4
n = load*nw
jacobi_mpi(n,niters)
end
Exercise: Ring communication - MPI¶
In [ ]:
using MPI
using Test
MPI.Init()
comm = MPI.COMM_WORLD
rank = MPI.Comm_rank(comm)
id = rank + 1
root = 0
size = MPI.Comm_size(comm)
dst = mod(rank + 1, size)
src = mod(rank - 1, size)
send_buf = id
recv_buf = 1
if rank == root
# Proc 1: Send id async to destination, then wait for receive
MPI.isend(send_buf, comm; dest=dst, tag=0)
recv_buf = MPI.recv(comm; source=src, tag=0)
@show recv_buf == factorial(size)
@test recv_buf == factorial(size)
else
# Other procs: receive sync and send async to next process
recv_buf = MPI.recv(comm; source=src, tag=0)
send_buf = recv_buf * id
MPI.isend(send_buf, comm; dest=dst, tag=0)
end
MPI.Finalize()
Exercise: Ring communication - Distributed.jl¶
In [ ]:
using Distributed
using Test
np = 4
add_n = np - nprocs()
addprocs(add_n)
worker_ids = workers()
@assert nprocs() > nworkers()
# Initialize id channel
id_chnl = RemoteChannel(()->Channel{Int}(1))
put!(id_chnl, 1)
# Initialize data channel
job_chnl = RemoteChannel(()->Channel{Int}(1))
put!(job_chnl, 1)
@sync for w in workers()
@spawnat w begin
pos = findfirst(worker_ids .== w) + 1
dst = mod(pos, np) + 1
src = mod(pos-2, np) + 1
while true
pred = fetch(id_chnl)
if pred == src
take!(id_chnl)
value = take!(job_chnl)
put!(job_chnl, value * pos)
put!(id_chnl, pos)
break
end
end
end
end
res = take!(job_chnl)
@show res
@test res == factorial(np)
rmprocs(workers())
License¶
TODO: replace link to website
This notebook is part of the course Programming Large Scale Parallel Systems at Vrije Universiteit Amsterdam and may be used under a CC BY 4.0 license.
In [ ]: