Proof read Jacobi method

This commit is contained in:
Gelieza K 2023-08-14 13:04:06 +02:00
parent 1c3062f249
commit d13ea2408a
3 changed files with 114 additions and 117 deletions

View File

@ -2110,7 +2110,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Julia 1.9.0",
"display_name": "Julia 1.9.1",
"language": "julia",
"name": "julia-1.9"
},
@ -2118,7 +2118,7 @@
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
"version": "1.9.0"
"version": "1.9.1"
}
},
"nbformat": 4,

View File

@ -35,17 +35,17 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 2,
"id": "1dc78750",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"jacobi_2_check (generic function with 1 method)"
"jacobi_4_check (generic function with 1 method)"
]
},
"execution_count": 5,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -62,7 +62,9 @@
"end\n",
"gauss_seidel_1_check(answer) = answer_checker(answer,\"c\")\n",
"jacobi_1_check(answer) = answer_checker(answer, \"d\")\n",
"jacobi_2_check(answer) = answer_checker(answer, \"b\")"
"jacobi_2_check(answer) = answer_checker(answer, \"b\")\n",
"jacobi_3_check(answer) = answer_checker(answer, \"c\")\n",
"jacobi_4_check(anwswer) = answer_checker(answer, \"d\")"
]
},
{
@ -73,7 +75,7 @@
"## The Jacobi method\n",
"\n",
"\n",
"The [Jacobi method](https://en.wikipedia.org/wiki/Jacobi_method) is a numerical tool to solve systems of linear algebraic equations. One of the main applications of the method is to solve boundary value problems (BVPs). I.e., given the values at the boundary (of a grid), the Jacoby method will find the interior values that fulfill a certain equation.\n",
"The [Jacobi method](https://en.wikipedia.org/wiki/Jacobi_method) is a numerical tool to solve systems of linear algebraic equations. One of the main applications of the method is to solve boundary value problems (BVPs). I.e., given the values at the boundary (of a grid), the Jacobi method will find the interior values that fulfill a certain equation.\n",
"\n"
]
},
@ -147,7 +149,7 @@
"metadata": {},
"source": [
"\n",
"### Where do we can exploit parallelism?\n",
"### Where can we exploit parallelism?\n",
"\n",
"Look at the two nested loops in the sequential implementation:\n",
"\n",
@ -161,7 +163,7 @@
"```\n",
"\n",
"- The outer loop cannot be parallelized. The value of `u` at step `t+1` depends on the value at the previous step `t`.\n",
"- The inner loop can be parallelized\n",
"- The inner loop can be parallelized.\n",
"\n"
]
},
@ -239,20 +241,12 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "4edad93f",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"It's not correct. Keep trying! 💪\n"
]
}
],
"outputs": [],
"source": [
"answer = \"x\" # replace x with a, b, c or d\n",
"gauss_seidel_1_check(answer)"
@ -340,18 +334,10 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"id": "3a03fc4c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"It's not correct. Keep trying! 💪\n"
]
}
],
"outputs": [],
"source": [
"answer = \"x\" # replace x with a, b, c or d\n",
"jacobi_1_check(answer)"
@ -366,7 +352,7 @@
"\n",
"We consider the implementation using MPI. The programming model of MPI is generally better suited for data-parallel algorithms like this one than the task-based model provided by Distributed.jl. In any case, one can also implement it using Distributed, but it requires some extra effort to setup remote channels right for the communication between neighbor processes.\n",
"\n",
"Take a look at the implementation below and try to understand it. Note that we have used MPIClustermanagers and Distributed just to run the MPI code on the notebook. When running it on a cluster MPIClustermanagers and Distributed are not needed.\n"
"Take a look at the implementation below and try to understand it. Note that we have used MPIClustermanagers and Distributed just to run the MPI code on the notebook. When running it on a cluster, MPIClustermanagers and Distributed are not needed.\n"
]
},
{
@ -386,7 +372,7 @@
"metadata": {},
"outputs": [],
"source": [
"using MPIClusterManagers\n",
"using MPIClusterManagers \n",
"using Distributed"
]
},
@ -407,13 +393,26 @@
{
"cell_type": "code",
"execution_count": null,
"id": "68851107",
"id": "a0923606",
"metadata": {},
"outputs": [],
"source": [
"@everywhere workers() begin\n",
"# Test cell, remove me\n",
"u = [-1, 0, 0, 0, 0, 1]\n",
"view(u, 6:6)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "68851107",
"metadata": {
"code_folding": []
},
"outputs": [],
"source": [
"@mpi_do manager begin\n",
" using MPI\n",
" MPI.Initialized() || MPI.Init()\n",
" comm = MPI.Comm_dup(MPI.COMM_WORLD)\n",
" nw = MPI.Comm_size(comm)\n",
" iw = MPI.Comm_rank(comm)+1\n",
@ -429,6 +428,7 @@
" u_new = copy(u)\n",
" for t in 1:niters\n",
" reqs = MPI.Request[]\n",
" # Exchange cell values with neighbors\n",
" if iw != 1\n",
" neig_rank = (iw-1)-1\n",
" req = MPI.Isend(view(u,2:2),comm,dest=neig_rank,tag=0)\n",
@ -438,8 +438,8 @@
" end\n",
" if iw != nw\n",
" neig_rank = (iw+1)-1\n",
" s = n_own-1\n",
" r = n_own\n",
" s = n_own+1\n",
" r = n_own+2\n",
" req = MPI.Isend(view(u,s:s),comm,dest=neig_rank,tag=0)\n",
" push!(reqs,req)\n",
" req = MPI.Irecv!(view(u,r:r),comm,source=neig_rank,tag=0)\n",
@ -453,6 +453,14 @@
" end\n",
" u\n",
" @show u\n",
" # Gather results in root process\n",
" results = zeros(n+2)\n",
" results[1] = -1\n",
" results[n+2] = 1\n",
" MPI.Gather!(view(u,2:n_own+1), view(results, 2:n+1), root=0, comm)\n",
" if iw == 1\n",
" @show results\n",
" end \n",
" end\n",
" niters = 100\n",
" load = 4\n",
@ -485,8 +493,60 @@
"outputs": [],
"source": [
"answer = \"x\" # replace x with a, b, c or d\n",
"jacobi_2_check(answer)\n",
"# TODO: think of more questions"
"jacobi_2_check(answer)"
]
},
{
"cell_type": "markdown",
"id": "075dd6d8",
"metadata": {},
"source": [
"<div class=\"alert alert-block alert-success\">\n",
"<b>Question:</b> After the end of the for-loop (line 43), ...\n",
"</div>\n",
"\n",
" a) each worker holds the complete solution.\n",
" b) the root process holds the solution. \n",
" c) the ghost cells contain redundant values. \n",
" d) all ghost cells contain the initial values -1 and 1. "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c3b58002",
"metadata": {},
"outputs": [],
"source": [
"answer = \"x\" # replace x with a, b, c or d\n",
"jacobi_3_check(answer)"
]
},
{
"cell_type": "markdown",
"id": "4537661d",
"metadata": {},
"source": [
"<div class=\"alert alert-block alert-success\">\n",
"<b>Question:</b> In line 35 of the code, we wait for all receive and send requests. Is it possible to instead wait for just the receive requests?\n",
"</div>\n",
"\n",
" \n",
" a) No, because the send buffer might be overwritten if we don't wait for send requests.\n",
" b) No, because MPI does not allow an asynchronous send without a Wait().\n",
" c) Yes, because each send has a matching receive, so all requests are done when the receive requests return. \n",
" d) Yes, because there are no writes to the send buffer in this iteration."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e16ea5eb",
"metadata": {},
"outputs": [],
"source": [
"answer = \"x\" # replace x with a, b, c or d.\n",
"jacobi_4_check(answer)"
]
},
{
@ -595,9 +655,8 @@
"metadata": {},
"outputs": [],
"source": [
"@everywhere workers() begin\n",
"@mpi_do manager begin\n",
" using MPI\n",
" MPI.Initialized() || MPI.Init()\n",
" comm = MPI.Comm_dup(MPI.COMM_WORLD)\n",
" nw = MPI.Comm_size(comm)\n",
" iw = MPI.Comm_rank(comm)+1\n",
@ -613,6 +672,7 @@
" u_new = copy(u)\n",
" for t in 1:niters\n",
" reqs = MPI.Request[]\n",
" # Exchange cell values with neighbors\n",
" if iw != 1\n",
" neig_rank = (iw-1)-1\n",
" req = MPI.Isend(view(u,2:2),comm,dest=neig_rank,tag=0)\n",
@ -622,8 +682,8 @@
" end\n",
" if iw != nw\n",
" neig_rank = (iw+1)-1\n",
" s = n_own-1\n",
" r = n_own\n",
" s = n_own+1\n",
" r = n_own+2\n",
" req = MPI.Isend(view(u,s:s),comm,dest=neig_rank,tag=0)\n",
" push!(reqs,req)\n",
" req = MPI.Irecv!(view(u,r:r),comm,source=neig_rank,tag=0)\n",
@ -637,77 +697,14 @@
" end\n",
" u\n",
" @show u\n",
" end\n",
" niters = 100\n",
" load = 4\n",
" n = load*nw\n",
" jacobi_mpi(n,niters)\n",
"end"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f302cce2",
"metadata": {},
"outputs": [],
"source": [
"## TODO move the following solution to its appropiate place:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4fa7fad3",
"metadata": {},
"outputs": [],
"source": [
"@everywhere workers() begin\n",
" using MPI\n",
" MPI.Initialized() || MPI.Init()\n",
" comm = MPI.Comm_dup(MPI.COMM_WORLD)\n",
" nw = MPI.Comm_size(comm)\n",
" iw = MPI.Comm_rank(comm)+1\n",
" function jacobi_mpi(n,niters)\n",
" if mod(n,nw) != 0\n",
" println(\"n must be a multiple of nw\")\n",
" MPI.Abort(comm,1)\n",
" end\n",
" n_own = div(n,nw)\n",
" u = zeros(n_own+2)\n",
" u[1] = -1\n",
" u[end] = 1\n",
" u_new = copy(u)\n",
" for t in 1:niters\n",
" reqs_snd = MPI.Request[]\n",
" reqs_rcv = MPI.Request[]\n",
" if iw != 1\n",
" neig_rank = (iw-1)-1\n",
" req = MPI.Isend(view(u,2:2),comm,dest=neig_rank,tag=0)\n",
" push!(reqs_snd,req)\n",
" req = MPI.Irecv!(view(u,1:1),comm,source=neig_rank,tag=0)\n",
" push!(reqs_rcv,req)\n",
" end\n",
" if iw != nw\n",
" neig_rank = (iw+1)-1\n",
" s = n_own-1\n",
" r = n_own\n",
" req = MPI.Isend(view(u,s:s),comm,dest=neig_rank,tag=0)\n",
" push!(reqs_snd,req)\n",
" req = MPI.Irecv!(view(u,r:r),comm,source=neig_rank,tag=0)\n",
" push!(reqs_rcv,req)\n",
" end\n",
" for i in 3:n_own\n",
" u_new[i] = 0.5*(u[i-1]+u[i+1])\n",
" end\n",
" MPI.Waitall(reqs_rcv)\n",
" for i in (2,n_own+1)\n",
" u_new[i] = 0.5*(u[i-1]+u[i+1])\n",
" end\n",
" MPI.Waitall(reqs_snd)\n",
" u, u_new = u_new, u\n",
" end\n",
" u\n",
" # Gather results in root process\n",
" results = zeros(n+2)\n",
" results[1] = -1\n",
" results[n+2] = 1\n",
" MPI.Gather!(view(u,2:n_own+1), view(results, 2:n+1), root=0, comm)\n",
" if iw == 1\n",
" @show results\n",
" end \n",
" end\n",
" niters = 100\n",
" load = 4\n",

View File

@ -56,9 +56,8 @@
"metadata": {},
"outputs": [],
"source": [
"@everywhere workers() begin\n",
"@mpi_do manager begin\n",
" using MPI\n",
" MPI.Initialized() || MPI.Init()\n",
" comm = MPI.Comm_dup(MPI.COMM_WORLD)\n",
" nw = MPI.Comm_size(comm)\n",
" iw = MPI.Comm_rank(comm)+1\n",
@ -84,8 +83,8 @@
" end\n",
" if iw != nw\n",
" neig_rank = (iw+1)-1\n",
" s = n_own-1\n",
" r = n_own\n",
" s = n_own+1\n",
" r = n_own+2\n",
" req = MPI.Isend(view(u,s:s),comm,dest=neig_rank,tag=0)\n",
" push!(reqs_snd,req)\n",
" req = MPI.Irecv!(view(u,r:r),comm,source=neig_rank,tag=0)\n",
@ -102,6 +101,7 @@
" u, u_new = u_new, u\n",
" end\n",
" u\n",
" @show u\n",
" end\n",
" niters = 100\n",
" load = 4\n",