copied from preCICE tutorials

This commit is contained in:
jakob.schratter 2026-01-26 16:14:46 +01:00
commit 3f1b1a6d0f
68 changed files with 156449 additions and 0 deletions

49
fluid-openfoam/0/T Normal file
View file

@ -0,0 +1,49 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object T;
}
dimensions [ 0 0 0 1 0 0 0 ];
internalField uniform 300;
boundaryField
{
interface
{
type fixedGradient;
gradient uniform 0;
}
inlet
{
type fixedValue;
value $internalField;
}
outlet
{
type zeroGradient;
}
top
{
type zeroGradient;
}
bottom
{
type zeroGradient;
}
slip-bottom
{
type zeroGradient;
}
outerWall
{
type zeroGradient;
}
defaultFaces
{
type empty;
}
}

44
fluid-openfoam/0/U Normal file
View file

@ -0,0 +1,44 @@
FoamFile
{
version 2.0;
format ascii;
class volVectorField;
object U;
}
dimensions [ 0 1 -1 0 0 0 0 ];
internalField uniform ( 0.1 0 0 );
boundaryField
{
interface
{
type noSlip;
}
inlet
{
type fixedValue;
value $internalField;
}
outlet
{
type zeroGradient;
}
top
{
type slip;
}
bottom
{
type noSlip;
}
slip-bottom
{
type slip;
}
defaultFaces
{
type empty;
}
}

49
fluid-openfoam/0/alphat Normal file
View file

@ -0,0 +1,49 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object alphat;
}
dimensions [ 1 -1 -1 0 0 0 0 ];
internalField uniform 0;
boundaryField
{
interface
{
type compressible::alphatWallFunction;
value uniform 0;
}
inlet
{
type compressible::alphatWallFunction;
value uniform 0;
}
outlet
{
type compressible::alphatWallFunction;
value uniform 0;
}
top
{
type compressible::alphatWallFunction;
value uniform 0;
}
bottom
{
type compressible::alphatWallFunction;
value uniform 0;
}
slip-bottom
{
type compressible::alphatWallFunction;
value uniform 0;
}
defaultFaces
{
type empty;
}
}

49
fluid-openfoam/0/epsilon Normal file
View file

@ -0,0 +1,49 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object epsilon;
}
dimensions [ 0 2 -3 0 0 0 0 ];
internalField uniform 0.01;
boundaryField
{
interface
{
type epsilonWallFunction;
value uniform 0.01;
}
inlet
{
type epsilonWallFunction;
value uniform 0.01;
}
outlet
{
type epsilonWallFunction;
value uniform 0.01;
}
top
{
type epsilonWallFunction;
value uniform 0.01;
}
bottom
{
type epsilonWallFunction;
value uniform 0.01;
}
slip-bottom
{
type epsilonWallFunction;
value uniform 0.01;
}
defaultFaces
{
type empty;
}
}

49
fluid-openfoam/0/k Normal file
View file

@ -0,0 +1,49 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object k;
}
dimensions [ 0 2 -2 0 0 0 0 ];
internalField uniform 0.1;
boundaryField
{
interface
{
type kqRWallFunction;
value uniform 0.1;
}
inlet
{
type kqRWallFunction;
value uniform 0.1;
}
outlet
{
type kqRWallFunction;
value uniform 0.1;
}
top
{
type kqRWallFunction;
value uniform 0.1;
}
slip-bottom
{
type kqRWallFunction;
value uniform 0.1;
}
bottom
{
type kqRWallFunction;
value uniform 0.1;
}
defaultFaces
{
type empty;
}
}

49
fluid-openfoam/0/nut Normal file
View file

@ -0,0 +1,49 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object nut;
}
dimensions [ 0 2 -1 0 0 0 0 ];
internalField uniform 0;
boundaryField
{
interface
{
type nutkWallFunction;
value uniform 0;
}
inlet
{
type nutkWallFunction;
value uniform 0;
}
outlet
{
type nutkWallFunction;
value uniform 0;
}
top
{
type nutkWallFunction;
value uniform 0;
}
bottom
{
type nutkWallFunction;
value uniform 0;
}
slip-bottom
{
type nutkWallFunction;
value uniform 0;
}
defaultFaces
{
type empty;
}
}

54
fluid-openfoam/0/p Normal file
View file

@ -0,0 +1,54 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object p;
}
dimensions [ 1 -1 -2 0 0 0 0 ];
internalField uniform 103500;
boundaryField
{
interface
{
type calculated;
value $internalField;
}
inlet
{
type calculated;
value $internalField;
}
outlet
{
type calculated;
value $internalField;
}
outerWall
{
type calculated;
value $internalField;
}
top
{
type calculated;
value $internalField;
}
bottom
{
type calculated;
value $internalField;
}
slip-bottom
{
type calculated;
value $internalField;
}
defaultFaces
{
type empty;
}
}

44
fluid-openfoam/0/p_rgh Normal file
View file

@ -0,0 +1,44 @@
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object p_rgh;
}
dimensions [ 1 -1 -2 0 0 0 0 ];
internalField uniform 103500;
boundaryField
{
interface
{
type zeroGradient;
}
inlet
{
type zeroGradient;
}
outlet
{
type fixedValue;
value $internalField;
}
top
{
type zeroGradient;
}
slip-bottom
{
type zeroGradient;
}
bottom
{
type zeroGradient;
}
defaultFaces
{
type empty;
}
}

6
fluid-openfoam/clean.sh Executable file
View file

@ -0,0 +1,6 @@
#!/bin/sh
set -e -u
. ../../tools/cleaning-tools.sh
clean_openfoam .

10
fluid-openfoam/constant/g Normal file
View file

@ -0,0 +1,10 @@
FoamFile
{
version 2.0;
format ascii;
class uniformDimensionedVectorField;
object g;
}
dimensions [0 1 -2 0 0 0 0];
value (0 -9.81 0);

View file

@ -0,0 +1,72 @@
/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2512 |
| \\ / A nd | Website: www.openfoam.com |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
arch "LSB;label=32;scalar=64";
class polyBoundaryMesh;
location "constant/polyMesh";
object boundary;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
7
(
inlet
{
type wall;
inGroups 1(wall);
nFaces 41;
startFace 23692;
}
outlet
{
type wall;
inGroups 1(wall);
nFaces 41;
startFace 23733;
}
top
{
type wall;
inGroups 1(wall);
nFaces 293;
startFace 23774;
}
slip-bottom
{
type wall;
inGroups 1(wall);
nFaces 81;
startFace 24067;
}
bottom
{
type wall;
inGroups 1(wall);
nFaces 51;
startFace 24148;
}
interface
{
type wall;
inGroups 1(wall);
nFaces 161;
startFace 24199;
}
defaultFaces
{
type empty;
inGroups 1(empty);
nFaces 24026;
startFace 24360;
}
)
// ************************************************************************* //

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,37 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "constant";
object thermophysicalProperties;
}
thermoType
{
type heRhoThermo;
mixture pureMixture;
transport const;
thermo hConst;
equationOfState perfectGas;
specie specie;
energy sensibleEnthalpy;
}
mixture
{
specie
{
molWeight 24.0999;
}
thermodynamics
{
Cp 5000.0;
Hf 0;
}
transport
{
mu 0.0002;
Pr 0.01;
}
}

View file

@ -0,0 +1,9 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object turbulenceProperties;
}
simulationType laminar;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,101 @@
TimeWindow TotalIterations Iterations Convergence
1 4 4 1
2 8 4 1
3 12 4 1
4 16 4 1
5 20 4 1
6 24 4 1
7 27 3 1
8 29 2 1
9 31 2 1
10 33 2 1
11 35 2 1
12 37 2 1
13 39 2 1
14 41 2 1
15 43 2 1
16 45 2 1
17 47 2 1
18 49 2 1
19 51 2 1
20 53 2 1
21 55 2 1
22 57 2 1
23 59 2 1
24 61 2 1
25 63 2 1
26 65 2 1
27 67 2 1
28 69 2 1
29 71 2 1
30 73 2 1
31 75 2 1
32 77 2 1
33 79 2 1
34 81 2 1
35 83 2 1
36 85 2 1
37 87 2 1
38 89 2 1
39 91 2 1
40 93 2 1
41 95 2 1
42 97 2 1
43 99 2 1
44 101 2 1
45 103 2 1
46 105 2 1
47 107 2 1
48 109 2 1
49 111 2 1
50 113 2 1
51 115 2 1
52 117 2 1
53 119 2 1
54 121 2 1
55 123 2 1
56 125 2 1
57 127 2 1
58 129 2 1
59 131 2 1
60 133 2 1
61 135 2 1
62 137 2 1
63 139 2 1
64 141 2 1
65 143 2 1
66 145 2 1
67 147 2 1
68 149 2 1
69 151 2 1
70 153 2 1
71 155 2 1
72 157 2 1
73 159 2 1
74 161 2 1
75 163 2 1
76 165 2 1
77 167 2 1
78 169 2 1
79 171 2 1
80 173 2 1
81 175 2 1
82 177 2 1
83 179 2 1
84 181 2 1
85 183 2 1
86 185 2 1
87 187 2 1
88 189 2 1
89 191 2 1
90 193 2 1
91 195 2 1
92 197 2 1
93 199 2 1
94 201 2 1
95 203 2 1
96 205 2 1
97 207 2 1
98 209 2 1
99 211 2 1
100 213 2 1

File diff suppressed because it is too large Load diff

12
fluid-openfoam/run.sh Executable file
View file

@ -0,0 +1,12 @@
#!/bin/bash
set -e -u
. ../../tools/log.sh
exec > >(tee --append "$LOGFILE") 2>&1
blockMesh
../../tools/run-openfoam.sh "$@"
. ../../tools/openfoam-remove-empty-dirs.sh && openfoam_remove_empty_dirs
close_log

View file

@ -0,0 +1,101 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object blockMeshDict;
}
vertices
(
(0 0 0)
(1 0 0)
(1 .5 0)
(0 .5 0)
(0 0 .4)
(1 0 .4)
(1 .5 .4)
(0 .5 .4)
(3 0 0)
(3 .5 0)
(3 0 .4)
(3 .5 .4)
(-.5 0 0)
(-.5 .5 0)
(-.5 .5 .4)
(-.5 0 .4)
);
blocks
(
hex (12 0 3 13 15 4 7 14) (81 41 1) simpleGrading (.2 15 1)
hex (0 1 2 3 4 5 6 7) (161 41 1) simpleGrading (5 15 1)
hex (1 8 9 2 5 10 11 6) (51 41 1) simpleGrading (1 15 1)
);
boundary
(
inlet
{
type wall;
faces
(
(13 12 15 14)
);
}
outlet
{
type wall;
faces
(
(8 9 11 10)
);
}
top
{
type wall;
faces
(
(7 6 2 3)
(9 2 6 11)
(13 3 7 14)
);
}
slip-bottom
{
type wall;
faces
(
(15 12 0 4)
);
}
bottom
{
type wall;
faces
(
(1 8 10 5)
);
}
interface
{
type wall;
faces
(
(4 0 1 5)
);
}
);

View file

@ -0,0 +1,46 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object controlDict;
}
application buoyantPimpleFoam;
startFrom startTime;
startTime 0;
stopAt endTime;
endTime 1;
deltaT 0.01;
writeControl runTime;
writeInterval 0.2;
purgeWrite 0;
writeFormat ascii;
writePrecision 6;
writeCompression off;
timeFormat general;
timePrecision 6;
runTimeModifiable false;
functions
{
preCICE_Adapter
{
type preciceAdapterFunctionObject;
libs ("libpreciceAdapterFunctionObject.so");
}
}

View file

@ -0,0 +1,16 @@
FoamFile {
version 2.0;
class dictionary;
object decomposeParDict;
format ascii;
}
numberOfSubdomains 2;
method simple;
simpleCoeffs
{
n (2 1 1);
delta 0.001;
}

View file

@ -0,0 +1,48 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object fvSchemes;
}
ddtSchemes
{
default Euler;
}
gradSchemes
{
default Gauss linear;
}
divSchemes
{
default none;
div(phi,U) Gauss upwind;
div(phi,h) Gauss upwind;
div(phi,e) Gauss upwind;
div(phi,k) Gauss upwind;
div(phi,epsilon) Gauss upwind;
div(phi,R) Gauss upwind;
div(phi,K) Gauss linear;
div(phi,Ekp) Gauss linear;
div(R) Gauss linear;
div(((rho*nuEff)*dev2(T(grad(U))))) Gauss linear;
}
laplacianSchemes
{
default Gauss linear corrected;
}
interpolationSchemes
{
default linear;
}
snGradSchemes
{
default corrected;
}

View file

@ -0,0 +1,55 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object fvSolution;
}
solvers
{
"rho.*"
{
solver PCG;
preconditioner DIC;
tolerance 0;
relTol 0;
}
p_rgh
{
solver PCG;
preconditioner DIC;
tolerance 1e-8;
relTol 0.01;
}
p_rghFinal
{
$p_rgh;
relTol 0;
}
"(U|h|e|k|epsilon|R)"
{
solver PBiCGStab;
preconditioner DILU;
tolerance 1e-6;
relTol 0.1;
}
"(U|h|e|k|epsilon|R)Final"
{
$U;
relTol 0;
}
}
PIMPLE
{
momentumPredictor yes;
nOuterCorrectors 1;
nCorrectors 2;
nNonOrthogonalCorrectors 0;
}

View file

@ -0,0 +1,33 @@
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object preciceDict;
}
preciceConfig "../precice-config.xml";
participant Fluid;
modules (CHT);
interfaces
{
Interface1
{
mesh Fluid-Mesh;
patches (interface);
readData
(
Heat-Flux
);
writeData
(
Temperature
);
};
};

View file

@ -0,0 +1,24 @@
#!python3
import sys
import os
problems = False
for file in sys.argv[1:]:
parts = file.split(os.sep)
# Ignore non-interesting files
if len(parts) != 3 or parts[1] != "images":
continue
if parts[0] == "quickstart":
prefix = "quickstart-"
else:
prefix = f"tutorials-{parts[0]}-"
if not parts[2].startswith(prefix):
print(f"Incorrect: {file}")
print(f"Expected prefix: {prefix}")
print()
problems = True
sys.exit(1 if problems else 0)

View file

@ -0,0 +1,3 @@
#!/bin/bash
sed -i "s/http:\/\/precice.org/https:\/\/precice.org/g" "$@"

42
preCICE_tools/check-size.sh Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
# Run this script at the root of the repository to check the size of images
CODE=0
MAXIMUMSIZE=750
MAXIMUMGIFSIZE=2200
RED='\033[0;31m'
NOCOLOR='\033[0m'
# Check tutorials
IGNORE="tools"
tutorials=$(find . -maxdepth 1 -type d -not -name ".*" | grep -vE $IGNORE | sed "s/^.\///")
echo "Limit for regular images: ${MAXIMUMSIZE} kb"
echo "Limit for gifs: ${MAXIMUMGIFSIZE} kb"
# For all tutorials do
for tutorial in $tutorials; do
images=$(find ./"${tutorial}"/images -type f 2> /dev/null | sed "s/^.\///")
for img in $images; do
actualsize=$(du -k "$img" | cut -f 1)
# Check gifs
if [[ "${img}" == *.gif || "${img}" == *.webp || "${img}" == *.webm ]]; then
if [ "${actualsize}" -ge "${MAXIMUMGIFSIZE}" ]; then
echo -e "$img:$RED $actualsize kb exceeds the limit of $MAXIMUMGIFSIZE kb. $NOCOLOR"
CODE=1
else
echo -e "$img: $actualsize kb (Ok)."
fi
else
if [ "${actualsize}" -ge "${MAXIMUMSIZE}" ]; then
echo -e "$img:$RED $actualsize kb exceeds the limit of $MAXIMUMSIZE kb. $NOCOLOR"
CODE=1
else
echo -e "$img: $actualsize kb (Ok)."
fi
fi
done
done
[ ! "$CODE" -eq "0" ] && echo "There have been errors"
exit $CODE

74
preCICE_tools/check.sh Executable file
View file

@ -0,0 +1,74 @@
#!/bin/bash
# Run this script at the root of the repository to check images and permalinks
CODE=0
# Check tutorials
IGNORE="tools|quickstart"
tutorials=$(find . -maxdepth 1 -type d -not -name ".*" | grep -vE $IGNORE | sed "s/^.\///")
for tutorial in $tutorials; do
# Check permalinks
docs=$(find "./$tutorial" -maxdepth 1 -type f -name "*.md" -print0 | xargs -0 grep -l "permalink:" | sed "s/^.\///")
for doc in $docs; do
link=$(grep "permalink:" "$doc" | sed "s/permalink: \+//")
prefix="tutorials-$tutorial"
if ! [[ $link =~ ^$prefix ]]; then
echo "$doc: error: wrong permalink"
echo "$doc: note: permalink \"$link\" does not start with \"$prefix\""
CODE=1
else
echo "$doc: info: correct permalink"
echo "$doc: note: permalink is \"$link\""
fi
echo
done
images=$(find "./$tutorial/images" -type f 2> /dev/null | sed "s/^.\///")
prefix="tutorials-$tutorial-"
for img in $images; do
if ! [[ $img =~ ^$tutorial/images/$prefix ]]; then
echo "$img: error: wrong filename"
echo "$img: note: expected prefix \"$prefix\""
CODE=1
else
echo "$img: info: correct filename"
fi
echo
done
done
# Check quickstart
docs=$(find ./quickstart -maxdepth 1 -type f -name "*.md" -print0 | xargs -0 grep -l "permalink:" | sed "s/^.\///")
for doc in $docs; do
link=$(grep "permalink:" "$doc" | sed "s/permalink: \+//")
prefix="quickstart"
if ! [[ $link =~ ^$prefix ]]; then
echo "$doc: error: wrong permalink"
echo "$doc: note: permalink \"$link\" does not start with \"$prefix\""
CODE=1
else
echo "$doc: info: correct permalink"
echo "$doc: note: permalink is \"$link\""
fi
echo
done
images=$(find ./quickstart/images -type f 2> /dev/null | sed "s/^.\///")
prefix="quickstart-"
for img in $images; do
if ! [[ $img =~ ^quickstart/images/$prefix ]]; then
echo "$img: error: wrong filename"
echo "$img: note: expected prefix \"$prefix\""
CODE=1
else
echo "$img: info: correct filename"
fi
echo
done
[ ! "$CODE" -eq "0" ] && echo "There have been errors"
exit $CODE

View file

@ -0,0 +1,11 @@
#!/bin/sh
set -e -u
# shellcheck disable=SC1091
. ../tools/cleaning-tools.sh
clean_tutorial .
clean_precice_logs .
rm -fv ./*.log
rm -fv ./*.vtu

188
preCICE_tools/cleaning-tools.sh Executable file
View file

@ -0,0 +1,188 @@
#!/bin/sh
error() {
echo "Error: $1" >&2
exit 1
}
clean_tutorial() {
(
set -e -u
cd "$1"
echo "# Cleaning up all cases in $(pwd)..."
rm -rfv ./precice-run/
# Run clean.sh if it exists in the base tutorial directory
if test -f "clean.sh"; then
./clean.sh
fi
for case in */; do
if [ "${case}" = images/ ] || [ "${case}" = reference-results/ ]; then
continue
fi
case "${case}" in solver*)
continue
esac
(cd "${case}" && ./clean.sh || echo "No cleaning script in ${case} - skipping")
done
)
}
clean_precice_logs() {
(
set -e -u
cd "$1"
echo "- Cleaning up preCICE logs in $(pwd)"
rm -fv ./precice-*-iterations.log \
./precice-*-convergence.log \
./precice-*-watchpoint-*.log \
./precice-*-watchintegral-*.log \
./core
rm -rfv ./precice-profiling/ profiling.json trace.json
rm -rfv ./precice-exports/
)
}
clean_case_logs() {
(
set -e -u
cd "$1"
echo "- Cleaning up general case logs in $(pwd)"
CASENAME="$(readlink -f "$0" | xargs dirname | xargs basename)"
rm -fv "./$CASENAME.log"
)
}
clean_calculix() {
(
set -e -u
cd "$1"
echo "- Cleaning up CalculiX case in $(pwd)"
rm -fv ./*.cvg ./*.dat ./*.frd ./*.sta ./*.12d ./*.rout spooles.out dummy
rm -fv WarnNodeMissMultiStage.nam
rm -fv ./*.eig
rm -fv ./*.vtk
clean_precice_logs .
clean_case_logs .
)
}
clean_codeaster() {
(
set -e -u
cd "$1"
echo "- Cleaning up code_aster case in $(pwd)"
rm -fv ./*.mess ./*.resu ./*.rmed
rm -rfv ./REPE_OUT/*
clean_precice_logs .
clean_case_logs .
)
}
clean_dealii() {
(
set -e -u
cd "$1"
echo "- Cleaning up deal.II case in $(pwd)"
rm -rfv ./dealii-output/
clean_precice_logs .
clean_case_logs .
)
}
clean_fenics() {
(
set -e -u
cd "$1"
echo "- Cleaning up FEniCS case in $(pwd)"
rm -rfv ./output/
clean_precice_logs .
clean_case_logs .
)
}
clean_nutils() {
(
set -e -u
cd "$1"
echo "- Cleaning up Nutils case in $(pwd)"
rm -fv ./*.vtk
clean_precice_logs .
clean_case_logs .
)
}
clean_openfoam() {
(
set -e -u
cd "$1"
echo "- Cleaning up OpenFOAM case in $(pwd)"
if [ -n "${WM_PROJECT:-}" ] || error "No OpenFOAM environment is active."; then
# shellcheck disable=SC1090 # This is an OpenFOAM file which we don't need to check
. "${WM_PROJECT_DIR}/bin/tools/CleanFunctions"
cleanCase > /dev/null
rm -rfv 0/uniform/functionObjects/functionObjectProperties history
fi
clean_precice_logs .
clean_case_logs .
)
}
clean_su2() {
(
set -e -u
cd "$1"
echo "- Cleaning up SU2 case in $(pwd)"
rm -fv ./restart_flow_*.dat ./restart_flow_*.csv forces_breakdown.dat ./surface_flow_*.csv ./flow_*.vtk ./history_*.vtk ./history.vtk ./history_*.csv ./history.csv ./surface_flow_*.vtu ./flow_*.vtu
clean_precice_logs .
clean_case_logs .
)
}
clean_aste() {
(
set -e -u
cd "$1"
echo "- Cleaning up ASTE results in $(pwd)"
rm -fv result.vtk result.stats.json
rm -fvr fine_mesh coarse_mesh mapped
)
}
clean_dune() {
(
set -e -u
cd "$1"
echo "- Cleaning up DUNE case in $(pwd)"
rm -fv ./dgfparser.log
rm -fv ./*.pvd
rm -fv ./*.vtu
rm -rfv ./output/
clean_precice_logs .
clean_case_logs .
)
}
clean_dumux() {
(
set -e -u
cd "$1"
echo "- Cleaning up DuMuX case in $(pwd)"
rm -fv ./*.vtu
rm -fv ./*.pvd
clean_precice_logs .
clean_case_logs .
)
}
clean_fmi() {
(
set -e -u
cd "$1"
echo "- Cleaning up FMI case in $(pwd)"
rm -rfv ./output/
clean_precice_logs .
clean_case_logs .
)
}

18
preCICE_tools/log.sh Normal file
View file

@ -0,0 +1,18 @@
#!/bin/bash
set -e -u
CASENAME="$(pwd | xargs basename)"
LOGFILE="$CASENAME.log"
export LOGFILE
STARTDATE="$(date --rfc-email)"
STARTTIME="$(date +%s)"
echo "Started on: $STARTDATE" | tee "$CASENAME.log" 2>&1
close_log() {
echo "Started on: $STARTDATE" | tee --append "$LOGFILE" 2>&1
ENDDATE="$(date --rfc-email)"
ENDTIME="$(date +%s)"
echo "Finished on: $ENDDATE" | tee --append "$LOGFILE" 2>&1
echo "Duration: $((ENDTIME-STARTTIME)) seconds (wall-clock time, including time waiting for participants)" | tee --append "$LOGFILE" 2>&1
}

View file

@ -0,0 +1,27 @@
#! /bin/sh
# Cleaning up stray functionObjectProperties files, see https://github.com/precice/openfoam-adapter/issues/26
openfoam_remove_empty_dirs() {
(
set -e -u
echo "Cleaning up any time directories without results"
for f in [0-9]* [0-9]*.[0-9]*; do
if ! [ -f "${f}/U" ] && ! [ -f "${f}/T" ] && ! [ -f "${f}/U.gz" ] && ! [ -f "${f}/T.gz" ] && ! [ -f "${f}/D" ] && ! [ -f "${f}/pointD" ] && ! [ -f "${f}/DD" ] && ! [ -f "${f}/pointDD" ] && ! [ -f "${f}/D.gz" ] && ! [ -f "${f}/pointD.gz" ] && ! [ -f "${f}/DD.gz" ] && ! [ -f "${f}/pointDD.gz" ]; then
rm -rf "${f}"
fi
done
if [ -d processor0 ]; then
for d in processor*; do
cd "${d}"
for f in [0-9]* [0-9]*.[0-9]*; do
if ! [ -f "${f}/U" ] && ! [ -f "${f}/T" ] && ! [ -f "${f}/U.gz" ] && ! [ -f "${f}/T.gz" ] && ! [ -f "${f}/D" ] && ! [ -f "${f}/pointD" ] && ! [ -f "${f}/DD" ] && ! [ -f "${f}/pointDD" ] && ! [ -f "${f}/D.gz" ] && ! [ -f "${f}/pointD.gz" ] && ! [ -f "${f}/DD.gz" ] && ! [ -f "${f}/pointDD.gz" ]; then
rm -rf "${f}"
fi
done
cd ..
done
fi
echo "Done."
)
}

36
preCICE_tools/run-dealii.sh Executable file
View file

@ -0,0 +1,36 @@
#!/bin/bash
set -e -u
EXE=""
for i in "$@"; do
case $i in
-e=* | --exec=*)
EXE="${i#*=}"
shift # past argument=value
;;
*)
# unknown option
;;
esac
done
# If the executable has been defined
if [ -n "${EXE}" ]; then
"${EXE}" parameters.prm
exit 0
fi
EXE="elasticity"
# If it is in the global path
if [ -n "$(command -v "${EXE}")" ]; then
"${EXE}" parameters.prm
exit 0
fi
# If it has been copied to the local directory
if test -f "elasticity"; then
./"${EXE}" parameters.prm
else
echo "Unable to find the executable ${EXE}. Either specify the executable explicitly (-e=/path/to/elasticity) or make it discoverable at runtime (e.g. export PATH)"
fi

19
preCICE_tools/run-openfoam.sh Executable file
View file

@ -0,0 +1,19 @@
#!/bin/sh
set -e # Not setting -u as it gets triggered by the OpenFOAM RunFunctions
# Prepare an (intentionally empty) .foam file for the ParaView OpenFOAM reader
CASENAME="$(pwd | xargs basename)"
touch "$CASENAME.foam"
# OpenFOAM run functions: getApplication, getNumberOfProcessors
# shellcheck disable=SC1090 # This is an OpenFOAM file which we don't need to check
. "${WM_PROJECT_DIR}/bin/tools/RunFunctions"
solver=$(getApplication)
if [ "${1:-}" = "-parallel" ]; then
procs=$(getNumberOfProcessors)
decomposePar -force
mpirun -np "${procs}" "${solver}" -parallel
reconstructPar
else
${solver}
fi

2
preCICE_tools/tests/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
*.pyc
*.pyo

View file

@ -0,0 +1,348 @@
---
title: preCICE system tests
permalink: dev-docs-system-tests.html
sidebar: docs_sidebar
keywords: pages, development, tests
summary: "Test complete simulations combining preCICE components of specific versions."
---
The tutorials repository hosts cases that need multiple components from the preCICE ecosystem to run. This directory provides tools that can automatically run complete simulations, using different versions of each component, and compare the results to references. While the main purpose is to run complete tests in the continuous integration workflows of preCICE, you can also run these tests on your laptop.
## Running the system tests
The main workflow for the user is executing the `systemtests.py` script. Depending on the options given to the script, it reads in the respective metadata files and generates `docker-compose.yaml` files that can start a fully-defined coupled simulation.
### Running the tests for a preCICE release
Workflow for the preCICE v3 release testing:
1. Collect the Git commits/tags of all components you want to test. The caching mechanism cannot detect changes based on branch names. The same effect might be encountered when rebasing and force-pushing the release branch.
2. In your terminal, navigate to the tutorials repository
3. Trigger the GitHub Actions Workflow. Until we merge the workflow to develop, this can only happen via the [GitHub CLI](https://cli.github.com/):
```bash
gh workflow run run_testsuite_manual.yml -f suites=release_test -f build_args="PRECICE_REF:v3.1.1,OPENFOAM_ADAPTER_REF:v1.3.0,PYTHON_BINDINGS_REF:v3.1.0,FENICS_ADAPTER_REF:v2.1.0,SU2_VERSION:7.5.1,SU2_ADAPTER_REF:64d4aff,TUTORIALS_REF:340b447" --ref=develop
```
4. Go to the tutorials [Actions](https://github.com/precice/tutorials/actions) page and find the running workflow
5. Check the status and the runtimes of each tutorial:
- Very small build times mean that the test is using cached container layers
- Most commonly, you will see tests failing with `Fieldcompare returned non zero exit code`. You will need to check the logs, but if the fieldcompare time is significant, this typically means that the numerical results differ above the tolerance (the test works!).
6. Download the build artifacts from Summary > runs.
- In there, you may want to check the `stdout.log` and `stderr.log` files.
- The produced results are in `precice-exports/`, the reference results in `reference-results-unpacked`.
- Compare using, e.g., ParaView or [fieldcompare](https://gitlab.com/dglaeser/fieldcompare): `fieldcompare dir precice-exports/ reference/`. The `--diff` option will give you `precice-exports/diff_*.vtu` files, while you can also try different tolerances with `-rtol` and `-atol`.
### Running specific test suites
To test a certain test-suite defined in `tests.yaml`, use:
```bash
python3 systemtests.py --suites=fenics_test,<someothersuite>
```
To discover all tests, use `python print_test_suites.py`.
To be able to fill in the right case tuple into the `tests.yaml`, you can use the `python3 print_case_combinations.py` script.
## Running the system tests on GitHub Actions
Go to Actions > [Run Testsuite (manual)](https://github.com/precice/tutorials/actions/workflows/run_testsuite_manual.yml) to see this workflow.
After bringing these changes to `master`, the manual triggering option should be visible on the top right. Until that happens, we can only trigger this workflow manually from the [GitHub CLI](https://github.blog/changelog/2021-04-15-github-cli-1-9-enables-you-to-work-with-github-actions-from-your-terminal/):
```shell
gh workflow run run_testsuite_manual.yml -f suites=fenics_test --ref=develop
```
Another example, to use the latest releases and enable debug information of the tests:
```shell
gh workflow run run_testsuite_manual.yml -f suites=fenics_test -f build_args="PRECICE_REF:v3.1.1,OPENFOAM_ADAPTER_REF:v1.3.0,PYTHON_BINDINGS_REF:v3.1.0,FENICS_ADAPTER_REF:v2.1.0,SU2_VERSION:7.5.1,SU2_ADAPTER_REF:64d4aff,TUTORIALS_REF:340b447" -f loglevel=DEBUG --ref=develop
```
where the `*_REF` should be a specific [commit-ish](https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefcommit-ishacommit-ishalsocommittish).
Example output:
```text
Run cd tools/tests
cd tools/tests
python systemtests.py --build_args=PRECICE_REF:v3.1.1,OPENFOAM_ADAPTER_REF:v1.3.0,PYTHON_BINDINGS_REF:v3.1.0,FENICS_ADAPTER_REF:v2.1.0 --suites=fenics_test --log-level=DEBUG
cd ../../
shell: /usr/bin/bash -e {0}
INFO: About to run the following systemtest in the directory /home/precice/runners_root/actions-runner-tutorial/_work/tutorials/tutorials/runs:
[Flow over heated plate (fluid-openfoam, solid-fenics)]
INFO: Started running Flow over heated plate (fluid-openfoam, solid-fenics), 0/1
DEBUG: Checking out tutorials master before copying
From https://github.com/precice/tutorials
* [new branch] master -> master
DEBUG: Building docker image for Flow over heated plate (fluid-openfoam, solid-fenics)
DEBUG: Running tutorial Flow over heated plate (fluid-openfoam, solid-fenics)
DEBUG: Running fieldcompare for Flow over heated plate (fluid-openfoam, solid-fenics)
DEBUG: extracting /home/precice/runners_root/actions-runner-tutorial/_work/tutorials/tutorials/flow-over-heated-plate/reference-results/fluid-openfoam_solid-fenics.tar.gz into /home/precice/runners_root/actions-runner-tutorial/_work/tutorials/tutorials/runs/flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723/reference_results
Using log-level: DEBUG
+---------------------------------------------------------+---------+-------------------+-----------------+-----------------------+
| systemtest | success | building time [s] | solver time [s] | fieldcompare time [s] |
CRITICAL: Fieldcompare returned non zero exit code, therefore Flow over heated plate (fluid-openfoam, solid-fenics) failed
INFO: Running Flow over heated plate (fluid-openfoam, solid-fenics) took 280.5861554039875 seconds
ERROR: Failed to run Flow over heated plate (fluid-openfoam, solid-fenics)
+---------------------------------------------------------+---------+-------------------+-----------------+-----------------------+
| Flow over heated plate (fluid-openfoam, solid-fenics) | 0 | 271.80 | 5.60 | 2.42 |
+---------------------------------------------------------+---------+-------------------+-----------------+-----------------------+
```
In this case, building and running seems to work out, but the tests fail because the results differ from the reference results. This may be incorrect, as the previous step may have silently failed.
## Understanding what went wrong
The easiest way to debug a systemtest run is first to have a look at the output written into the action on GitHub.
If this does not provide enough hints, the next step is to download the generated `runs` artifact. Note that by default this will only be generated if the systemtests fail.
Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation.
## Adding new tests
### Adding tutorials
In order for the systemtests to pick up the tutorial we need to define a `metadata.yaml` in the folder of the tutorial. There are a few `metadata.yaml` already present to get inspiration from. You can also have a look at the implementation details but normally the currently available ones should be easy to adopt. You can check your metadata parsing by `python print_metadata.py` and `python print_case_combinations.py`
### Adding Testsuites
To add a testsuite just open the `tests.yaml` file and use the output of `python print_case_combinations.py` to add the right case combinations you want to test. Note that you can specify a `reference_result` which is not yet present. The `generate_reference_data.py` will pick that up and create it for you.
Note that its important to carefully check the paths of the `reference_result` in order to not have typos in there. Also note that same cases in different testsuites should use the same `reference_result`.
### Generate reference results
Since we need data to compare against, you need to run `python generate_reference_data.py`. This process might take a while.
Please include the generated reference results in the pull request as they are strongly connected to the new testsuites.
## Implementation details
Each tutorial contains automation scripts (mainly `run.sh` and `clean.sh`), as well as metadata (`metadata.yaml`). The metadata file describes the available cases, how to run them, as well as their dependencies. A central `tests.yaml` file in this directory defines test suites, which execute different combinations of cases. The Python script `systemtests.py` executes the tests, allowing to filter for specific components or test suites.
Let's dive deeper into some of these aspects.
### General architecture
Each tutorial directory contains a metadata file, describing which participants each case directory implements, and how to run it.
A list of tests describes all tests to be executed, grouped by test suites. Each test is a combination of tutorial cases.
Test steps include modifying the tutorial configuration files for the test system, building the Docker containers used by the respective Docker Compose service of each component, and comparing results to reference results using fieldcompare.
Tests are executed by the `systemtests.py` script, which starts the Docker Compose. This can be executed locally, and it is the same script that GitHub Actions also execute.
The multi-stage Docker build allows building each component separately from the same Dockerfile, while Docker reuses cached layers. The Docker Compose services consider GitHub Actions Cache when building the services, although the cache is currently only updated, but not hit (see https://github.com/precice/tutorials/pull/372#issuecomment-1748335750).
### File structure
Metadata and workflow/script files:
- `.github/workflows/`
- `run_testsuite_workflow.yml`: workflow for running the tests, triggered by other workflows (e.g., other repositories)
- `run_testsuite_manual.yml`: manual triggering front-end for `run_testsuite_workflow.yml`
- `flow-over-a-heated-plate/`
- `fluid-openfoam/`
- `run.sh`: describes how to execute the respective case
- `solid-fenics/`
- `solid-openfoam/`
- ...
- `metadata.yml`: describes each case directory (which participant, which component, which script to run, ...)
- `tools/tests/`
- `component-templates/`: jinja2 templates for Docker Compose services for the components
- `calculix-adapter.yaml`
- `fenics-adapter.yaml`
- `openfoam-adapter.yaml`
- ...
- `dockerfiles/ubuntu_2204/`
- Dockerfile: a multi-stage build Dockerfile that defines how to build each component, in a layered approach
- `docker-compose.template.yaml`: Describes how to prepare each test (Docker Componse service template)
- `docker-compose.field_compare.template.yaml`: Describes how to compare results with fieldcompare (Docker Compose service template)
- `components.yaml`: Declares the available components and their parameters/options
- `reference_results.metadata.template`: Template for reporting the versions used to generate the reference results
- `reference_versions.yaml`: List of arguments to use for generating the reference results
- `tests.yaml`: Declares the available tests, grouped in test suites
User-facing tools:
- `tools/tests/`
- `systemtests.py`: Executes the system tests, starting Docker Compose services of each required component (after building them), running each test, and comparing the results to reference results.
- `print_test_suites.py`: Prints the available tests.
- `print_metadata.py`: Prints the metadata of each tutorial that contains a `metadata.yaml` file.
- `print_case_combinations.py`: Prints all possible combinations of tutorial cases, using the `metadata.yaml` files.
- `build_docker_images.py`: Build the Docker images for each test
- `generate_reference_results.py`: Executes the system tests with the versions defined in `reference_versions.yaml` and generates the reference data archives, with the names described in `tests.yaml`. (should only be used by the CI Pipeline)
Implementation scripts:
- `tools/tests/`
- `systemtests.py`: Main entry point
- `requirements.txt`: Dependencies (jinja2, pyyaml)
- `metadata_parser/`: Reads the YAML files into Python objects (defines the schema)
- `systemtests/`: Main implementation classes
- `Systemtest.py`
- `SystemtestArguments.py`
- `TestSuite.py`
### Metadata
Every tutorial contains a file called `metadata.yaml` describing some important properties of the tutorial. For example:
```yaml
name: Elastic tube 3D
path: elastic-tube-3d
url: https://precice.org/tutorials-elastic-tube-3d.html
participants:
- Fluid
- Solid
cases:
fluid-openfoam:
participant: Fluid
directory: ./fluid-openfoam
run: ./run.sh
component: openfoam-adapter
solid-calculix:
participant: Solid
directory: ./solid-calculix
run: ./run.sh
component: calculix-adapter
solid-fenics:
participant: Solid
directory: ./solid-fenics
run: ./run.sh
component: fenics-adapter
```
Description:
- `name`: A human-readable, descriptive name
- `path`: Where the tutorial is located, relative to the tutorials repository
- `url`: A web page with more information on the tutorial
- `participants`: A list of preCICE participants, typically corresponing to different domains of the simulation
- `cases`: A list of solver configuration directories. Each element of the list includes:
- `participant`: Which participant this solver case can serve as
- `directory`: Where the case directory is located, relative to the tutorial directory
- `run`: Command that executes the tutorial
- `component`: Component or list of components that this case depends upon (typically an adapter)
### Components
The components mentioned in the Metadata are defined in the central `components.yaml` file. This file also specifies some arguments and their default values. These arguments can be anything, but most often they are version-related information. For example, the version of the OpenFOAM library used by the openfoam-adapter component, or the openfoam-adapter component version itself. For example:
```yaml
openfoam-adapter:
repository: https://github.com/precice/openfoam-adapter
template: component-templates/openfoam-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
OPENFOAM_EXECUTABLE:
options: ["openfoam2306","openfoam2212","openfoam2112"]
description: exectuable of openfoam to use
default: "openfoam2306"
OPENFOAM_ADAPTER_REF:
description: Reference/tag of the actual OpenFOAM adapter
default: "master"
```
This `openfoam-adapter` component has the following attributes:
- `repository`: URL to the Git projects
- `template`: A template for a Docker Compose service of this component
- `build_arguments`: Arguments passed to the Docker Compose service (arbitrary)
#### Naming schema for build_arguments
Since the docker containers are still a bit mixed in terms of capabilities and support for different build_argument combinations the following rules apply:
- A build_argument ending in **_REF** means that it refers to a git commit-ish (like a tag or commit) beeing used to build the image. Its important to not use branch names here as we heavily rely on dockers build cache to speedup things. But since the input variable to the docker builder will not change, we might have wrong cache hits.
- All other build_arguments are free of rules and up to the container maintainer.
### Component templates
Templates for defining a Docker Compose service for each component are available in `component-templates/`. For example:
```yaml
image: precice/fenics-adapter:{{ build_arguments["FENICS_ADAPTER_REF"] }}
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"
```
This template defines:
- `image`: The base Docker image for this component, including a Git reference (tag), provided to the template as argument (e.g., by the `systemtests.py` script).
- `depends_on`: Other services this service depends upon, typically a preparation service that fetches all components and tutorials.
- `volumes`: Directories mapped between the host and the container. Apart from directories relating to the users and groups, this also defines where to run the cases.
- `command`: How to run a case depending on this component, including how and where to redirect any screen output.
### Tests
Concrete tests are specified centrally in the file `tests.yaml`. For example:
```yaml
test_suites:
openfoam_adapter_pr:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-openfoam
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-openfoam.tar.gz
openfoam_adapter_release:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-openfoam
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-openfoam.tar.gz
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-fenics
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-fenics.tar.gz
```
This defines two test suites, namely `openfoam_adapter_pr` and `openfoam_adapter_release`. Each of them defines which case combinations of which tutorials to run.
### Generate Reference Results
#### via GitHub workflow (recommended)
The preferred way of adding reference results is via the manual triggerable `Generate reference results (manual)` workflow. This takes two inputs:
- `from_ref`: branch where the new test configuration (e.g added tests, new reference_versions.yaml) is
- `commit_msg`: commit message for adding the reference results into the branch
The workflow will checkout the `from_ref`, take the status of the systemtests of that branch and execute `python generate_reference_results.py`, upload the LFS objects into the self-hosted LFS server and add a commit with `commit_msg` onto the `from_ref` branch.
#### manually
In order to generate the reference results edit the `reference_versions.yaml` to match the required `build_arguments` otherwise passed via the cli.
Executing `generate_reference_results.py` will then generate the following files:
- all distinct `.tar.gz` defined in the `tests.yaml`
- a `reference_results.md` in the tutorial folder describing the arguments used and a sha-1 hash of the `tar.gz` archive.
The reference result archive will later be unpacked again during the systemtest and compared using `fieldcompare`
Please note that these files should always be kept in the git lfs.

View file

@ -0,0 +1,96 @@
import argparse
from pathlib import Path
from systemtests.SystemtestArguments import SystemtestArguments
from systemtests.Systemtest import Systemtest, display_systemtestresults_as_table
from systemtests.TestSuite import TestSuites
from metadata_parser.metdata import Tutorials
import logging
import time
from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR
def main():
parser = argparse.ArgumentParser(description='build docker images')
# Add an argument for the components
parser.add_argument('--suites', type=str,
help='Comma-separated test-suites to execute')
parser.add_argument(
'--build_args',
type=str,
help='Comma-separated list of arguments provided to the components like openfoam:2102,pythonbindings:latest')
parser.add_argument('--rundir', type=str, help='Directory to run the systemstests in.',
nargs='?', const=PRECICE_TESTS_RUN_DIR, default=PRECICE_TESTS_RUN_DIR)
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
# Parse the command-line arguments
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
systemtests_to_run = []
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
build_args = SystemtestArguments.from_args(args.build_args)
run_directory = Path(args.rundir)
if args.suites:
test_suites_requested = args.suites.split(',')
available_testsuites = TestSuites.from_yaml(
PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
test_suites_to_execute = []
for test_suite_requested in test_suites_requested:
test_suite_found = available_testsuites.get_by_name(
test_suite_requested)
if not test_suite_found:
logging.error(f"Did not find the testsuite with name {test_suite_requested}")
else:
test_suites_to_execute.append(test_suite_found)
if not test_suites_to_execute:
raise RuntimeError(
f"No matching test suites with names {test_suites_requested} found. Use print_test_suites.py to get an overview")
# now convert the test_suites into systemtests
for test_suite in test_suites_to_execute:
tutorials = test_suite.cases_of_tutorial.keys()
for tutorial in tutorials:
for case, reference_result in zip(
test_suite.cases_of_tutorial[tutorial], test_suite.reference_results[tutorial]):
systemtests_to_run.append(
Systemtest(tutorial, build_args, case, reference_result))
if not systemtests_to_run:
raise RuntimeError("Did not find any Systemtests to execute.")
logging.info(f"About to build the images for the following systemtests:\n {systemtests_to_run}")
results = []
for number, systemtest in enumerate(systemtests_to_run):
logging.info(f"Started building {systemtest}, {number}/{len(systemtests_to_run)}")
t = time.perf_counter()
result = systemtest.run_only_build(run_directory)
elapsed_time = time.perf_counter() - t
logging.info(f"Building image for {systemtest} took {elapsed_time} seconds")
results.append(result)
build_docker_success = True
for result in results:
if not result.success:
logging.error(f"Failed to run {result.systemtest}")
build_docker_success = False
else:
logging.info(f"Success running {result.systemtest}")
display_systemtestresults_as_table(results)
if build_docker_success:
exit(0)
else:
exit(1)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: precice
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: calculix_adapter
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: fenics_adapter
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: nutils_adapter
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: openfoam_adapter
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
openfoam {{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: python_bindings
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
{{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,16 @@
build:
context: {{ dockerfile_context }}
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
target: su2_adapter
depends_on:
prepare:
condition: service_completed_successfully
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}/{{ case_folder }}' &&
SU2_RUN="/home/precice/SU2_RUN/bin" PYTHONPATH="/home/precice/SU2_RUN/bin:$PYTHONPATH" {{ run }} | tee system-tests_{{ case_folder }}.log 2>&1"

View file

@ -0,0 +1,129 @@
bare: # A default component used when the solver does not have any dependencies apart from preCICE itself
repository: https://github.com/precice/precice
template: component-templates/bare.yaml
build_arguments: # these things mean something to the docker-service
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
python-bindings:
repository: https://github.com/precice/python-bindings
template: component-templates/python-bindings.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the pythonbindings to use
default: "master"
openfoam-adapter:
repository: https://github.com/precice/openfoam-adapter
template: component-templates/openfoam-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
OPENFOAM_EXECUTABLE:
options: ["openfoam2306","openfoam2212","openfoam2112"]
description: exectuable of openfoam to use
default: "openfoam2306"
OPENFOAM_ADAPTER_REF:
description: Reference/tag of the actual OpenFOAM adapter
default: "master"
fenics-adapter:
repository: https://github.com/precice/fenics-adapter
template: component-templates/fenics-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the pythonbindings to use
default: "master"
FENICS_ADAPTER_REF:
semnantic: Git ref of the fenics adapter to use
default: "master"
nutils-adapter:
repository: https://github.com/precice/nutils-adapter
template: component-templates/nutils-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the pythonbindings to use
calculix-adapter:
repository: https://github.com/precice/calculix-adapter
template: component-templates/calculix-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
CALULIX_VERSION:
description: Version of Calculix to use
default: "2.20"
CALULIX_ADAPTER_REF:
description: Version of Calculix-Adapter to use
default: "master"
su2-adapter:
repository: https://github.com/precice/su2-adapter
template: component-templates/su2-adapter.yaml
build_arguments:
PRECICE_REF:
description: Version of preCICE to use
default: "main"
PLATFORM:
description: Dockerfile platform used
default: "ubuntu_2204"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
SU2_VERSION:
description: Version of SU2 to use
default: "7.5.1"
SU2_ADAPTER_REF:
description: Version of SU2-Adapter to use
default: "master"

View file

@ -0,0 +1,13 @@
version: "3.9"
services:
field-compare:
build: https://github.com/dglaeser/fieldcompare-action.git # use the docker container provided by fieldcompare
volumes:
- {{ run_directory }}:/runs
command:
- /runs/{{ tutorial_folder }}/{{ precice_output_folder }}
- /runs/{{ tutorial_folder }}/{{ reference_output_folder }}
- "-rtol 3e-7"
# Currently its really hard to estimate the impact of compiling and executing in a different platform (like github actions)
# 3e-7 might not be the thightest we can afford and we want to have but its an okayish guestimation for now.

View file

@ -0,0 +1,24 @@
version: "3.9"
services:
prepare:
build:
context: {{ dockerfile_context }}
target: base_image
args:
{% for key, value in build_arguments.items() %}
- {{key}}={{value}}
{% endfor %}
volumes:
- {{ run_directory }}:/runs
command: >
/bin/bash -c "id &&
cd '/runs/{{ tutorial_folder }}' &&
sed -i 's%</participant>%<export:vtk directory=\"../{{precice_output_folder}}\" /> </participant>%g' precice-config.xml &&
sed -i 's|m2n:sockets |m2n:sockets network=\"eth0\" |g' precice-config.xml &&
cat precice-config.xml"
{% for service in services %}
{{ service }}:
{{ services[service] |indent(4) }}
{% endfor %}

View file

@ -0,0 +1,156 @@
FROM ubuntu:22.04 as base_image
USER root
SHELL ["/bin/bash", "-c"]
ENV DEBIAN_FRONTEND=noninteractive
# We set a sensical value, but still have the possibilty to influence this via the build time arguments.
# When the dockerfile is built using the systemtests.py we set the PRECICE_UID and PRECICE_GID to the user executing the systemtests.
# This ensures no file ownership problems down the line and is the most easy fix, as we normally built the containers locally
# If not built via the systemtests.py its either possible to specify manually but 1000 would be the default anyway.
ARG PRECICE_UID=1000
ARG PRECICE_GID=1000
RUN groupadd -g ${PRECICE_GID} precice && useradd -u ${PRECICE_UID} -g ${PRECICE_GID} -ms /bin/bash precice
ENV PATH="${PATH}:/home/precice/.local/bin"
ENV LD_LIBRARY_PATH="/home/precice/.local/lib:${LD_LIBRARY_PATH}"
ENV CPATH="/home/precice/.local/include:$CPATH"
# Enable detection with pkg-config and CMake
ENV PKG_CONFIG_PATH="/home/precice/.local/lib/pkgconfig:$PKG_CONFIG_PATH"
ENV CMAKE_PREFIX_PATH="/home/precice/.local:$CMAKE_PREFIX_PATH"
USER precice
FROM base_image as precice_dependecies
USER root
# Installing necessary dependecies for preCICE
RUN apt-get -qq update && \
apt-get -qq -y install \
build-essential \
software-properties-common \
cmake \
curl \
g++ \
gfortran \
git \
libbenchmark-dev \
libboost-all-dev \
libeigen3-dev \
libxml2-dev \
lsb-release \
petsc-dev \
python3-dev \
python3-numpy \
python3-pip \
python3-venv \
pkg-config \
wget
USER precice
RUN python3 -m pip install --user --upgrade pip
FROM precice_dependecies as precice
# Install & build precice into /home/precice/precice
ARG PRECICE_REF
USER precice
WORKDIR /home/precice
RUN git clone https://github.com/precice/precice.git precice && \
cd precice && \
git checkout ${PRECICE_REF} && \
mkdir build && cd build &&\
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/home/precice/.local/ -DPRECICE_PETScMapping=OFF -DBUILD_TESTING=OFF && \
make all install -j $(nproc)
FROM precice_dependecies as openfoam_adapter
ARG OPENFOAM_EXECUTABLE
USER root
RUN apt-get update &&\
wget -q -O - https://dl.openfoam.com/add-debian-repo.sh | bash &&\
apt-get -qq install ${OPENFOAM_EXECUTABLE}-dev &&\
ln -s $(which ${OPENFOAM_EXECUTABLE} ) /usr/bin/openfoam
USER precice
COPY --from=precice /home/precice/.local/ /home/precice/.local/
ARG OPENFOAM_ADAPTER_REF
# Build the OpenFOAM adapter
USER precice
WORKDIR /home/precice
RUN git clone https://github.com/precice/openfoam-adapter.git &&\
cd openfoam-adapter && \
git checkout ${OPENFOAM_ADAPTER_REF} && \
/usr/bin/${OPENFOAM_EXECUTABLE} ./Allwmake -j $(nproc)
FROM precice_dependecies as python_bindings
COPY --from=precice /home/precice/.local/ /home/precice/.local/
ARG PYTHON_BINDINGS_REF
USER precice
WORKDIR /home/precice
# Builds the precice python bindings for python3
# Installs also matplotlib as its needed for the elastic-tube 1d fluid-python participant.
RUN pip3 install --user git+https://github.com/precice/python-bindings.git@${PYTHON_BINDINGS_REF} && \
pip3 install --user matplotlib
FROM precice_dependecies as fenics_adapter
COPY --from=python_bindings /home/precice/.local /home/precice/.local
USER root
RUN add-apt-repository -y ppa:fenics-packages/fenics && \
apt-get -qq update && \
apt-get -qq install --no-install-recommends fenics
USER precice
RUN pip3 install --user fenics-ufl
ARG FENICS_ADAPTER_REF
# Building fenics-adapter
RUN pip3 install --user git+https://github.com/precice/fenics-adapter.git@${FENICS_ADAPTER_REF}
FROM precice_dependecies as nutils_adapter
COPY --from=python_bindings /home/precice/.local /home/precice/.local
USER precice
# Installing nutils - There is no adapter
RUN pip3 install --user nutils
FROM precice_dependecies as calculix_adapter
COPY --from=precice /home/precice/.local /home/precice/.local
USER root
RUN apt-get -qq update && \
apt-get -qq install libarpack2-dev libspooles-dev libyaml-cpp-dev
ARG CALULIX_VERSION
USER precice
#Download Calculix
WORKDIR /home/precice
RUN wget http://www.dhondt.de/ccx_${CALULIX_VERSION}.src.tar.bz2 && \
tar xvjf ccx_${CALULIX_VERSION}.src.tar.bz2 && \
rm -fv ccx_${CALULIX_VERSION}.src.tar.bz2
ARG CALULIX_ADAPTER_REF
WORKDIR /home/precice
RUN git clone https://github.com/precice/calculix-adapter.git && \
cd calculix-adapter && \
git checkout ${CALULIX_ADAPTER_REF} &&\
make CXX_VERSION=${CALULIX_VERSION} ADDITIONAL_FFLAGS="-fallow-argument-mismatch" -j $(nproc) && \
ln -s /home/precice/calculix-adapter/bin/ccx_preCICE /home/precice/.local/bin/ccx_preCICE
FROM python_bindings as su2_adapter
COPY --from=precice /home/precice/.local /home/precice/.local
USER root
RUN apt-get -qq update && \
apt-get -qq install swig
ARG SU2_VERSION
USER precice
# Download and build SU2 (We could also use pre-built binaries from the SU2 releases)
WORKDIR /home/precice
RUN wget https://github.com/su2code/SU2/archive/refs/tags/v${SU2_VERSION}.tar.gz && \
tar xvzf v${SU2_VERSION}.tar.gz && \
rm -fv v${SU2_VERSION}.tar.gz
RUN pip3 install --user mpi4py
ARG SU2_ADAPTER_REF
WORKDIR /home/precice
ENV SU2_RUN="/home/precice/SU2_RUN"
ENV SU2_HOME="/home/precice/SU2-${SU2_VERSION}"
ENV PATH="/home/precice/su2-adapter/run:$SU2_RUN:$PATH"
ENV PYTHONPATH="$SU2_RUN:$PYTHONPATH"
RUN git clone https://github.com/precice/su2-adapter.git && \
cd su2-adapter &&\
git checkout ${SU2_ADAPTER_REF} &&\
./su2AdapterInstall
RUN cd "${SU2_HOME}" &&\
./meson.py build -Denable-pywrapper=true --prefix=$SU2_RUN &&\
./ninja -C build install

View file

@ -0,0 +1,148 @@
import argparse
from metadata_parser.metdata import Tutorials, ReferenceResult
from systemtests.TestSuite import TestSuites
from systemtests.SystemtestArguments import SystemtestArguments
from systemtests.Systemtest import Systemtest
from pathlib import Path
from typing import List
from paths import PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR
import hashlib
from jinja2 import Environment, FileSystemLoader
import tarfile
import subprocess
from datetime import datetime
import logging
from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR, PRECICE_REL_OUTPUT_DIR
import time
def create_tar_gz(source_folder: Path, output_filename: Path):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_folder, arcname=output_filename.name.replace(".tar.gz", ""))
def get_machine_informations():
def command_is_avail(command: str):
try:
rc = subprocess.call(['which', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
return False
return rc == 0
uname_info = "uname not available on the machine the systemtests were executed."
lscpu_info = "lscpu not available on the machine the systemtests were executed."
if (command_is_avail("uname")):
result = subprocess.run(["uname", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
uname_info = result.stdout
if (command_is_avail("lscpu") and command_is_avail("grep")):
result_lscpu = subprocess.run(["lscpu"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
result = subprocess.run(["grep", "-v", "Vulner"], input=result_lscpu.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
lscpu_info = result.stdout
return (uname_info, lscpu_info)
def render_reference_results_info(
reference_results: List[ReferenceResult],
arguments_used: SystemtestArguments,
time: str):
def sha256sum(filename):
with open(filename, 'rb', buffering=0) as f:
return hashlib.file_digest(f, 'sha256').hexdigest()
files = []
for reference_result in reference_results:
files.append({
'sha256': sha256sum(reference_result.path),
'time': time,
'name': reference_result.path.name,
})
uname, lscpu = get_machine_informations()
render_dict = {
'arguments': arguments_used.arguments,
'files': files,
'uname': uname,
'lscpu': lscpu,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template("reference_results.metadata.template")
return template.render(render_dict)
def main():
parser = argparse.ArgumentParser(description='Generate reference data for systemtests')
parser.add_argument('--rundir', type=str, help='Directory to run the systemstests in.',
nargs='?', const=PRECICE_TESTS_RUN_DIR, default=PRECICE_TESTS_RUN_DIR)
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
run_directory = Path(args.rundir)
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
test_suites = TestSuites.from_yaml(PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
# Read in parameters
build_args = SystemtestArguments.from_yaml(PRECICE_TESTS_DIR / "reference_versions.yaml")
systemtests_to_run = set()
for test_suite in test_suites:
tutorials = test_suite.cases_of_tutorial.keys()
for tutorial in tutorials:
for case, reference_result in zip(
test_suite.cases_of_tutorial[tutorial], test_suite.reference_results[tutorial]):
systemtests_to_run.add(
Systemtest(tutorial, build_args, case, reference_result))
reference_result_per_tutorial = {}
current_time_string = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"About to run the following tests {systemtests_to_run}")
for number, systemtest in enumerate(systemtests_to_run):
logging.info(f"Started running {systemtest}, {number}/{len(systemtests_to_run)}")
t = time.perf_counter()
result = systemtest.run_for_reference_results(run_directory)
elapsed_time = time.perf_counter() - t
logging.info(f"Running {systemtest} took {elapsed_time} seconds")
if not result.success:
raise RuntimeError(f"Failed to execute {systemtest}")
reference_result_per_tutorial[systemtest.tutorial] = []
# Put the tar.gz in there
for systemtest in systemtests_to_run:
reference_result_folder = systemtest.get_system_test_dir() / PRECICE_REL_OUTPUT_DIR
reference_result_per_tutorial[systemtest.tutorial].append(systemtest.reference_result)
# create folder if needed
systemtest.reference_result.path.parent.mkdir(parents=True, exist_ok=True)
if reference_result_folder.exists():
create_tar_gz(reference_result_folder, systemtest.reference_result.path)
else:
raise RuntimeError(
f"Error executing: \n {systemtest} \n Could not find result folder {reference_result_folder}\n Probably the tutorial did not run through properly. Please check corresponding logs")
# write readme
for tutorial in reference_result_per_tutorial.keys():
with open(tutorial.path / "reference_results.metadata", 'w') as file:
ref_results_info = render_reference_results_info(
reference_result_per_tutorial[tutorial], build_args, current_time_string)
logging.info(f"Writing results for {tutorial.name}")
file.write(ref_results_info)
logging.info(f"Done. Please make sure to manually have a look into the reference results before making a PR.")
if __name__ == '__main__':
main()

View file

@ -0,0 +1,443 @@
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Tuple, Optional, Dict
import glob
import yaml
import itertools
from paths import PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR
@dataclass
class BuildArgument:
"""Represents a BuildArgument needed to run the docker container"""
description: str
"""The description of the parameter."""
key: str
"""The name of the parameter."""
value_options: Optional[list] = None
"""The optinal list of value options for the parameter. If none is suplied all values are accepted"""
default: Optional[str] = None
"""The default value for the parameter."""
@property
def required(self) -> bool:
"""
Check if the BuildArgument need to be supplied via CommandLineArgs
Returns:
bool: True if the parameter is required, False otherwise.
"""
return False if self.default else True
def __eq__(self, other) -> bool:
if isinstance(other, BuildArgument):
return self.key == other.key
return False
def __hash__(self) -> int:
return hash(self.key)
def __repr__(self) -> str:
return f"{self.key}"
class BuildArguments:
"""Represents a collection of build_arguments used to built the docker images."""
def __init__(self, arguments: List[BuildArgument]):
self.arguments = arguments
@classmethod
def from_components_yaml(cls, data):
"""
Create a list of Paramters from the components YAML data.
Args:
data: The components YAML data.
"""
arguments = []
for argument_name, argument_dict in data['build_arguments'].items():
# TODO maybe **params
description = argument_dict.get(
'description', f"No description provided for {argument_name}")
key = argument_name
default = argument_dict.get('default', None)
value_options = argument_dict.get('value_options', None)
arguments.append(BuildArgument(
description, key, value_options, default))
return cls(arguments)
def __iter__(self):
return iter(self.arguments)
def __getitem__(self, index):
return self.arguments[index]
def __setitem__(self, index, value):
self.arguments[index] = value
def __len__(self):
return len(self.arguments)
def __repr__(self) -> str:
return f"{self.arguments}"
@dataclass
class Component:
"""
Represents a component like e.g the openfoam-adapter
"""
name: str
template: str
repository: str
parameters: BuildArguments
def __eq__(self, other):
if isinstance(other, Component):
return self.name == other.name
return False
def __repr__(self) -> str:
return f"{self.name}"
class Components(list):
"""
Represents the collection of components read in from the components.yaml
"""
def __init__(self, components: List[Component]):
self.components = components
@classmethod
def from_yaml(cls, path):
"""
Creates a Components instance from a YAML file.
Args:
path: The path to the YAML file.
Returns:
An instance of Components.
"""
components = []
with open(path, 'r') as f:
data = yaml.safe_load(f)
for component_name in data:
parameters = BuildArguments.from_components_yaml(
data[component_name])
repository = data[component_name]["repository"]
template = data[component_name]["template"]
components.append(
Component(component_name, template, repository, parameters))
return cls(components)
def __iter__(self):
return iter(self.components)
def __getitem__(self, index):
return self.components[index]
def __setitem__(self, index, value):
self.components[index] = value
def __len__(self):
return len(self.components)
def get_by_name(self, name_to_search):
"""
Retrieves a component by its name.
Args:
name_to_search: The name of the component to search for.
Returns:
The component with the specified name, or None if not found.
"""
for component in self.components:
if component.name == name_to_search:
return component
return None
@dataclass
class Participant:
"""Represents a participant in a coupled simulation"""
name: str
"""The name of the participant."""
def __eq__(self, other) -> bool:
if isinstance(other, Participant):
return self.name == other.name
return False
def __repr__(self) -> str:
return f"{self.name}"
# Forward declaration of tutorial
class Tutorial:
pass
@dataclass
class Case:
"""
Represents a case inside of a tutorial.
"""
name: str
participant: str
path: Path
run_cmd: str
tutorial: Tutorial = field(init=False)
component: Component
def __post_init__(self):
"""
Performs sanity checks after initializing the Case instance.
"""
if not self.component:
raise Exception(
f'Tried to instantiate the case {self.name} but failed. Reason: Could not find the component it uses in the components.yaml file.')
@classmethod
def from_dict(cls, name, dict, available_components):
"""
Creates a Case instance from a the tutorial yaml dict.
Args:
name: The name of the case.
dict: The dictionary containing the case data.
available_components: Components read from the components.yaml file
Returns:
An instance of the Case but without the tutorial set, this needs to be done later
"""
participant = dict["participant"]
path = Path(dict["directory"])
run_cmd = dict["run"]
component = available_components.get_by_name(dict["component"])
return cls(name, participant, path, run_cmd, component)
def __repr__(self) -> str:
return f"{self.name}"
def __hash__(self) -> int:
return hash(f"{self.name,self.participant,self.component,self.tutorial}")
def __eq__(self, other) -> bool:
if isinstance(other, Case):
return (
self.name == other.name) and (
self.participant == other.participant) and (
self.component == other.component) and (
self.tutorial == other.tutorial)
return False
@dataclass
class CaseCombination:
"""Represents a case combination able to run the tutorial"""
cases: Tuple[Case]
tutorial: Tutorial
def __eq__(self, other) -> bool:
if isinstance(other, CaseCombination):
return set(self.cases) == set(other.cases)
return False
def __repr__(self) -> str:
return f"{self.cases}"
@classmethod
def from_string_list(cls, case_names: List[str], tutorial: Tutorial):
cases = []
for case_name in case_names:
cases.append(tutorial.get_case_by_string(case_name))
return cls(tuple(cases), tutorial)
@classmethod
def from_cases_tuple(cls, cases: Tuple[Case], tutorial: Tutorial):
return cls(cases, tutorial)
@dataclass
class ReferenceResult:
path: Path
case_combination: CaseCombination
def __repr__(self) -> str:
return f"{self.path.as_posix()}"
def __post_init__(self):
# built full path
self.path = PRECICE_TUTORIAL_DIR / self.path
@dataclass
class Tutorial:
"""
Represents a tutorial with various attributes and methods.
"""
name: str
path: Path
url: str
participants: List[str]
cases: List[Case]
case_combinations: List[CaseCombination] = field(init=False)
def __post_init__(self):
for case in self.cases:
case.tutorial = self
# get all case combinations
def get_all_possible_case_combinations(tutorial: Tutorial):
case_combinations = []
cases_dict = {}
for participant in tutorial.participants:
cases_dict[participant] = []
for case in tutorial.cases:
cases_dict[case.participant].append(case)
for combination in itertools.product(*[cases_dict[participant] for participant in tutorial.participants]):
case_combinations.append(CaseCombination.from_cases_tuple(combination, self))
return case_combinations
self.case_combinations = get_all_possible_case_combinations(self)
def __eq__(self, other) -> bool:
if isinstance(other, Tutorial):
return (self.name == other.name) and (self.path == other.path)
return False
def __hash__(self) -> int:
return hash(self.path)
def __repr__(self) -> str:
"""
Returns a string representation of the Tutorial.
"""
return f"""\n{self.name}:
Path: {self.path}
URL: {self.url}
Participants: {self.participants}
Cases: {self.cases}
"""
def get_case_by_string(self, case_name: str) -> Optional[Case]:
"""
Retrieves Optional case based on the case_name
Args:
case_name: the name of the case in search
Returns:
Either None or a Case mathing the casename
"""
for case in self.cases:
if case.name == case_name:
return case
return None
@classmethod
def from_yaml(cls, path, available_components):
"""
Creates a Tutorial instance from a YAML file.
Args:
path: The path to the YAML file.
available_components: The Components instance containing available components.
Returns:
An instance of Tutorial.
"""
with open(path, 'r') as f:
data = yaml.safe_load(f)
name = data['name']
path = PRECICE_TUTORIAL_DIR / data['path']
url = data['url']
participants = data.get('participants', [])
cases_raw = data.get('cases', {})
cases = []
for case_name in cases_raw.keys():
cases.append(Case.from_dict(
case_name, cases_raw[case_name], available_components))
return cls(name, path, url, participants, cases)
class Tutorials(list):
"""
Represents a collection of tutorials.
"""
def __iter__(self):
return iter(self.tutorials)
def __getitem__(self, index):
return self.tutorials[index]
def __setitem__(self, index, value):
self.tutorials[index] = value
def __len__(self):
return len(self.tutorials)
def __init__(self, tutorials: List[Tutorial]):
"""
Initializes the Tutorials instance with a base path and a list of tutorials.
Args:
path: The path to the folder containing the tutorial folders.
tutorials: The list of tutorials.
"""
self.tutorials = tutorials
def get_by_path(self, relative_path: str) -> Optional[Tutorial]:
"""
Retrieves a Tutorial by its relative path.
Args:
path_to_search: The path of the Tutorial to search for.
Returns:
The Tutorial with the specified path, or None if not found.
"""
for tutorial in self.tutorials:
if tutorial.path.name == relative_path:
return tutorial
return None
@classmethod
def from_path(cls, path):
"""
Read ins all the metadata.yaml files available in path/*/metadata.yaml
Args:
path: The path containing the tutorial folders
"""
yaml_files = glob.glob(f'{path}/*/metadata.yaml')
tutorials = []
available_components = Components.from_yaml(
PRECICE_TESTS_DIR / "components.yaml")
for yaml_path in yaml_files:
tut = Tutorial.from_yaml(yaml_path, available_components)
tutorials.append(tut)
return cls(tutorials)

View file

@ -0,0 +1,7 @@
from pathlib import Path
PRECICE_TUTORIAL_DIR = Path(__file__).parent.parent.parent
PRECICE_TESTS_RUN_DIR = Path(__file__).parent.parent.parent / "runs"
PRECICE_TOOLS_DIR = Path(__file__).parent.parent
PRECICE_TESTS_DIR = Path(__file__).parent
PRECICE_REL_OUTPUT_DIR = "precice-exports"
PRECICE_REL_REFERENCE_DIR = "reference-results-unpacked"

View file

@ -0,0 +1,32 @@
import yaml
from metadata_parser.metdata import Tutorials
from paths import PRECICE_TUTORIAL_DIR
import argparse
import logging
def main():
parser = argparse.ArgumentParser(description='Prints available Metadata for tutorials')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
tutorials = {}
for tutorial in available_tutorials:
cases_combinations = [
f"{combination}" for combination in tutorial.case_combinations]
tutorials[tutorial.path.name] = cases_combinations
print(yaml.dump(tutorials))
if __name__ == '__main__':
main()

View file

@ -0,0 +1,26 @@
from metadata_parser.metdata import Tutorials
from paths import PRECICE_TUTORIAL_DIR
import argparse
import logging
def main():
parser = argparse.ArgumentParser(description='Prints available Metadata for tutorials')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
print("Fount the following tutorials read from the metadata.yaml")
for tutorial in available_tutorials:
print(tutorial)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,29 @@
from metadata_parser.metdata import Tutorials
from systemtests.TestSuite import TestSuites
from paths import PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR
import argparse
import logging
def main():
parser = argparse.ArgumentParser(description='Prints available Test Suites')
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
available_testsuites = TestSuites.from_yaml(
PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
print(available_testsuites)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,34 @@
<!---
This File has been generated by the generate_reference_results.py and should not be modified manually
-->
# Reference Results
This file contains an overview of the results over the reference results as well as the arguments used to generate them.
We also include some information on the machine used to generate them
## List of files
| name | time | sha256 |
|------|------|-------|
{% for file in files -%}
| {{ file.name }} | {{ file.time }} | {{ file.sha256 }} |
{% endfor %}
## List of arguments used to generate the files
| name | value |
|------|------|
{% for name,value in arguments.items() -%}
| {{ name }} | {{ value }} |
{% endfor -%}
## Information about the machine
### uname -a
{{ uname }}
### lscpu
{{ lscpu }}

View file

@ -0,0 +1,11 @@
PRECICE_REF: "v3.1.1"
OPENFOAM_EXECUTABLE: "openfoam2312"
OPENFOAM_ADAPTER_REF: "v1.3.0"
PYTHON_BINDINGS_REF: "v3.1.0"
FENICS_ADAPTER_REF: "v2.1.0"
TUTORIALS_REF: "340b447" # April 12, 2024, just before v202404.0
PLATFORM: "ubuntu_2204"
CALULIX_VERSION: "2.20"
CALULIX_ADAPTER_REF: "v2.20.1"
SU2_VERSION: "7.5.1"
SU2_ADAPTER_REF: "64d4aff" # Distribution v2404

View file

@ -0,0 +1,2 @@
jinja2
pyyaml

View file

@ -0,0 +1,96 @@
import argparse
from pathlib import Path
from systemtests.SystemtestArguments import SystemtestArguments
from systemtests.Systemtest import Systemtest, display_systemtestresults_as_table
from systemtests.TestSuite import TestSuites
from metadata_parser.metdata import Tutorials, Case
import logging
import time
from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR
def main():
parser = argparse.ArgumentParser(description='systemtest')
# Add an argument for the components
parser.add_argument('--suites', type=str,
help='Comma-separated test-suites to execute')
parser.add_argument(
'--build_args',
type=str,
help='Comma-separated list of arguments provided to the components like openfoam:2102,pythonbindings:latest')
parser.add_argument('--rundir', type=str, help='Directory to run the systemstests in.',
nargs='?', const=PRECICE_TESTS_RUN_DIR, default=PRECICE_TESTS_RUN_DIR)
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
# Parse the command-line arguments
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
systemtests_to_run = []
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
build_args = SystemtestArguments.from_args(args.build_args)
run_directory = Path(args.rundir)
if args.suites:
test_suites_requested = args.suites.split(',')
available_testsuites = TestSuites.from_yaml(
PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
test_suites_to_execute = []
for test_suite_requested in test_suites_requested:
test_suite_found = available_testsuites.get_by_name(
test_suite_requested)
if not test_suite_found:
logging.error(f"Did not find the testsuite with name {test_suite_requested}")
else:
test_suites_to_execute.append(test_suite_found)
if not test_suites_to_execute:
raise RuntimeError(
f"No matching test suites with names {test_suites_requested} found. Use print_test_suites.py to get an overview")
# now convert the test_suites into systemtests
for test_suite in test_suites_to_execute:
tutorials = test_suite.cases_of_tutorial.keys()
for tutorial in tutorials:
for case, reference_result in zip(
test_suite.cases_of_tutorial[tutorial], test_suite.reference_results[tutorial]):
systemtests_to_run.append(
Systemtest(tutorial, build_args, case, reference_result))
if not systemtests_to_run:
raise RuntimeError("Did not find any Systemtests to execute.")
logging.info(f"About to run the following systemtest in the directory {run_directory}:\n {systemtests_to_run}")
results = []
for number, systemtest in enumerate(systemtests_to_run):
logging.info(f"Started running {systemtest}, {number}/{len(systemtests_to_run)}")
t = time.perf_counter()
result = systemtest.run(run_directory)
elapsed_time = time.perf_counter() - t
logging.info(f"Running {systemtest} took {elapsed_time} seconds")
results.append(result)
system_test_success = True
for result in results:
if not result.success:
logging.error(f"Failed to run {result.systemtest}")
system_test_success = False
else:
logging.info(f"Success running {result.systemtest}")
display_systemtestresults_as_table(results)
if system_test_success:
exit(0)
else:
exit(1)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,629 @@
import subprocess
from typing import List, Dict, Optional
from jinja2 import Environment, FileSystemLoader
from dataclasses import dataclass, field
import shutil
from pathlib import Path
from paths import PRECICE_REL_OUTPUT_DIR, PRECICE_TOOLS_DIR, PRECICE_REL_REFERENCE_DIR, PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR
from metadata_parser.metdata import Tutorial, CaseCombination, Case, ReferenceResult
from .SystemtestArguments import SystemtestArguments
from datetime import datetime
import tarfile
import time
import unicodedata
import re
import logging
import os
GLOBAL_TIMEOUT = 600
SHORT_TIMEOUT = 10
def slugify(value, allow_unicode=False):
"""
Taken from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode(
'ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_')
class Systemtest:
pass
@dataclass
class DockerComposeResult:
exit_code: int
stdout_data: List[str]
stderr_data: List[str]
systemtest: Systemtest
runtime: float # in seconds
@dataclass
class FieldCompareResult:
exit_code: int
stdout_data: List[str]
stderr_data: List[str]
systemtest: Systemtest
runtime: float # in seconds
@dataclass
class SystemtestResult:
success: bool
stdout_data: List[str]
stderr_data: List[str]
systemtest: Systemtest
build_time: float # in seconds
solver_time: float # in seconds
fieldcompare_time: float # in seconds
def display_systemtestresults_as_table(results: List[SystemtestResult]):
"""
Prints the result in a nice tabluated way to get an easy overview
"""
def _get_length_of_name(results: List[SystemtestResult]) -> int:
return max(len(str(result.systemtest)) for result in results)
max_name_length = _get_length_of_name(results)
header = f"| {'systemtest':<{max_name_length + 2}} | {'success':^7} | {'building time [s]':^17} | {'solver time [s]':^15} | {'fieldcompare time [s]':^21} |"
separator = "+-" + "-" * (max_name_length + 2) + \
"-+---------+-------------------+-----------------+-----------------------+"
print(separator)
print(header)
print(separator)
for result in results:
row = f"| {str(result.systemtest):<{max_name_length + 2}} | {result.success:^7} | {result.build_time:^17.2f} | {result.solver_time:^15.2f} | {result.fieldcompare_time:^21.2f} |"
print(row)
print(separator)
@dataclass
class Systemtest:
"""
Represents a system test by specifing the cases and the corresponding Tutorial
"""
tutorial: Tutorial
arguments: SystemtestArguments
case_combination: CaseCombination
reference_result: ReferenceResult
params_to_use: Dict[str, str] = field(init=False)
env: Dict[str, str] = field(init=False)
def __eq__(self, other) -> bool:
if isinstance(other, Systemtest):
return (
self.tutorial == other.tutorial) and (
self.arguments == other.arguments) and (
self.case_combination == other.case_combination)
return False
def __hash__(self) -> int:
return hash(f"{self.tutorial,self.arguments,self.case_combination}")
def __post_init__(self):
self.__init_args_to_use()
self.env = {}
def __init_args_to_use(self):
"""
Checks if all required parameters for the realisation of the cases are supplied in the cmdline arguments.
If a parameter is missing and it's required, an exception is raised.
Otherwise, the default value is used if available.
In the end it populates the args_to_use dict
Raises:
Exception: If a required parameter is missing.
"""
self.params_to_use = {}
needed_parameters = set()
for case in self.case_combination.cases:
needed_parameters.update(case.component.parameters)
for needed_param in needed_parameters:
if self.arguments.contains(needed_param.key):
self.params_to_use[needed_param.key] = self.arguments.get(
needed_param.key)
else:
if needed_param.required:
raise Exception(
f"{needed_param} is needed to be given via --params to instantiate the systemtest for {self.tutorial.name}")
else:
self.params_to_use[needed_param.key] = needed_param.default
def __get_docker_services(self) -> Dict[str, str]:
"""
Renders the service templates for each case using the parameters to use.
Returns:
A dictionary of rendered services per case name.
"""
try:
plaform_requested = self.params_to_use.get("PLATFORM")
except Exception as exc:
raise KeyError("Please specify a PLATFORM argument") from exc
self.dockerfile_context = PRECICE_TESTS_DIR / "dockerfiles" / Path(plaform_requested)
if not self.dockerfile_context.exists():
raise ValueError(
f"The path {self.dockerfile_context.resolve()} resulting from argument PLATFORM={plaform_requested} could not be found in the system")
def render_service_template_per_case(case: Case, params_to_use: Dict[str, str]) -> str:
render_dict = {
'run_directory': self.run_directory.resolve(),
'tutorial_folder': self.tutorial_folder,
'build_arguments': params_to_use,
'params': params_to_use,
'case_folder': case.path,
'run': case.run_cmd,
'dockerfile_context': self.dockerfile_context,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template(case.component.template)
return template.render(render_dict)
rendered_services = {}
for case in self.case_combination.cases:
rendered_services[case.name] = render_service_template_per_case(
case, self.params_to_use)
return rendered_services
def __get_docker_compose_file(self):
rendered_services = self.__get_docker_services()
render_dict = {
'run_directory': self.run_directory.resolve(),
'tutorial_folder': self.tutorial_folder,
'tutorial': self.tutorial.path.name,
'services': rendered_services,
'build_arguments': self.params_to_use,
'dockerfile_context': self.dockerfile_context,
'precice_output_folder': PRECICE_REL_OUTPUT_DIR,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template("docker-compose.template.yaml")
return template.render(render_dict)
def __get_field_compare_compose_file(self):
render_dict = {
'run_directory': self.run_directory.resolve(),
'tutorial_folder': self.tutorial_folder,
'precice_output_folder': PRECICE_REL_OUTPUT_DIR,
'reference_output_folder': PRECICE_REL_REFERENCE_DIR + "/" + self.reference_result.path.name.replace(".tar.gz", ""),
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template(
"docker-compose.field_compare.template.yaml")
return template.render(render_dict)
def _get_git_ref(self, repository: Path, abbrev_ref=False) -> Optional[str]:
try:
result = subprocess.run([
"git",
"-C", os.fspath(repository.resolve()),
"rev-parse",
"--abbrev-ref" if abbrev_ref else
"HEAD"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True, check=True, timeout=60)
current_ref = result.stdout.strip()
return current_ref
except Exception as e:
raise RuntimeError(f"An error occurred while getting the current Git ref: {e}") from e
def _fetch_ref(self, repository: Path, ref: str):
try:
result = subprocess.run([
"git",
"-C", os.fspath(repository.resolve()),
"fetch"
], check=True, timeout=60)
if result.returncode != 0:
raise RuntimeError(f"git command returned code {result.returncode}")
except Exception as e:
raise RuntimeError(f"An error occurred while fetching origin '{ref}': {e}")
def _checkout_ref_in_subfolder(self, repository: Path, subfolder: Path, ref: str):
try:
result = subprocess.run([
"git",
"-C", os.fspath(repository.resolve()),
"checkout", ref,
"--", os.fspath(subfolder.resolve())
], check=True, timeout=60)
if result.returncode != 0:
raise RuntimeError(f"git command returned code {result.returncode}")
except Exception as e:
raise RuntimeError(f"An error occurred while checking out '{ref}' for folder '{repository}': {e}")
def __copy_tutorial_into_directory(self, run_directory: Path):
"""
Checks out the requested tutorial ref and copies the entire tutorial into a folder to prepare for running.
"""
current_time_string = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.run_directory = run_directory
current_ref = self._get_git_ref(PRECICE_TUTORIAL_DIR)
ref_requested = self.params_to_use.get("TUTORIALS_REF")
if ref_requested:
logging.debug(f"Checking out tutorials {ref_requested} before copying")
self._fetch_ref(PRECICE_TUTORIAL_DIR, ref_requested)
self._checkout_ref_in_subfolder(PRECICE_TUTORIAL_DIR, self.tutorial.path, ref_requested)
self.tutorial_folder = slugify(f'{self.tutorial.path.name}_{self.case_combination.cases}_{current_time_string}')
destination = run_directory / self.tutorial_folder
src = self.tutorial.path
self.system_test_dir = destination
shutil.copytree(src, destination)
if ref_requested:
with open(destination / "tutorials_ref", 'w') as file:
file.write(ref_requested)
self._checkout_ref_in_subfolder(PRECICE_TUTORIAL_DIR, self.tutorial.path, current_ref)
def __copy_tools(self, run_directory: Path):
destination = run_directory / "tools"
src = PRECICE_TOOLS_DIR
try:
shutil.copytree(src, destination)
except Exception as e:
logging.debug(f"tools are already copied: {e} ")
def __put_gitignore(self, run_directory: Path):
# Create the .gitignore file with a single asterisk
gitignore_file = run_directory / ".gitignore"
with gitignore_file.open("w") as file:
file.write("*")
def __cleanup(self):
shutil.rmtree(self.run_directory)
def __get_uid_gid(self):
try:
uid = int(subprocess.check_output(["id", "-u"]).strip())
gid = int(subprocess.check_output(["id", "-g"]).strip())
return uid, gid
except Exception as e:
logging.error("Error getting group and user id: ", e)
def __write_env_file(self):
with open(self.system_test_dir / ".env", "w") as env_file:
for key, value in self.env.items():
env_file.write(f"{key}={value}\n")
def __unpack_reference_results(self):
with tarfile.open(self.reference_result.path) as reference_results_tared:
# specify which folder to extract to
reference_results_tared.extractall(self.system_test_dir / PRECICE_REL_REFERENCE_DIR)
logging.debug(
f"extracting {self.reference_result.path} into {self.system_test_dir / PRECICE_REL_REFERENCE_DIR}")
def _run_field_compare(self):
"""
Writes the Docker Compose file to disk, executes docker-compose up, and handles the process output.
Args:
docker_compose_content: The content of the Docker Compose file.
Returns:
A SystemtestResult object containing the state.
"""
logging.debug(f"Running fieldcompare for {self}")
time_start = time.perf_counter()
self.__unpack_reference_results()
docker_compose_content = self.__get_field_compare_compose_file()
stdout_data = []
stderr_data = []
with open(self.system_test_dir / "docker-compose.field_compare.yaml", 'w') as file:
file.write(docker_compose_content)
try:
# Execute docker-compose command
process = subprocess.Popen(['docker',
'compose',
'--file',
'docker-compose.field_compare.yaml',
'up',
'--exit-code-from',
'field-compare'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
cwd=self.system_test_dir)
try:
stdout, stderr = process.communicate(timeout=GLOBAL_TIMEOUT)
except KeyboardInterrupt as k:
process.kill()
raise KeyboardInterrupt from k
except Exception as e:
logging.critical(
f"Systemtest {self} had serious issues executing the docker compose command about to kill the docker compose command. Please check the logs! {e}")
process.kill()
process.communicate(timeout=SHORT_TIMEOUT)
stdout_data.extend(stdout.decode().splitlines())
stderr_data.extend(stderr.decode().splitlines())
process.poll()
elapsed_time = time.perf_counter() - time_start
return FieldCompareResult(process.returncode, stdout_data, stderr_data, self, elapsed_time)
except Exception as e:
logging.CRITICAL("Error executing docker compose command:", e)
elapsed_time = time.perf_counter() - time_start
return FieldCompareResult(1, stdout_data, stderr_data, self, elapsed_time)
def _build_docker(self):
"""
Builds the docker image
"""
logging.debug(f"Building docker image for {self}")
time_start = time.perf_counter()
docker_compose_content = self.__get_docker_compose_file()
with open(self.system_test_dir / "docker-compose.tutorial.yaml", 'w') as file:
file.write(docker_compose_content)
stdout_data = []
stderr_data = []
try:
# Execute docker-compose command
process = subprocess.Popen(['docker',
'compose',
'--file',
'docker-compose.tutorial.yaml',
'build'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
cwd=self.system_test_dir)
try:
stdout, stderr = process.communicate(timeout=GLOBAL_TIMEOUT)
except KeyboardInterrupt as k:
process.kill()
# process.send_signal(9)
raise KeyboardInterrupt from k
except Exception as e:
logging.critical(
f"systemtest {self} had serious issues building the docker images via the `docker compose build` command. About to kill the docker compose command. Please check the logs! {e}")
process.communicate(timeout=SHORT_TIMEOUT)
process.kill()
stdout_data.extend(stdout.decode().splitlines())
stderr_data.extend(stderr.decode().splitlines())
elapsed_time = time.perf_counter() - time_start
return DockerComposeResult(process.returncode, stdout_data, stderr_data, self, elapsed_time)
except Exception as e:
logging.critical(f"Error executing docker compose build command: {e}")
elapsed_time = time.perf_counter() - time_start
return DockerComposeResult(1, stdout_data, stderr_data, self, elapsed_time)
def _run_tutorial(self):
"""
Runs precice couple
Returns:
A DockerComposeResult object containing the state.
"""
logging.debug(f"Running tutorial {self}")
time_start = time.perf_counter()
stdout_data = []
stderr_data = []
try:
# Execute docker-compose command
process = subprocess.Popen(['docker',
'compose',
'--file',
'docker-compose.tutorial.yaml',
'up'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
cwd=self.system_test_dir)
try:
stdout, stderr = process.communicate(timeout=GLOBAL_TIMEOUT)
except KeyboardInterrupt as k:
process.kill()
# process.send_signal(9)
raise KeyboardInterrupt from k
except Exception as e:
logging.critical(
f"Systemtest {self} had serious issues executing the docker compose command about to kill the docker compose command. Please check the logs! {e}")
process.kill()
stdout, stderr = process.communicate(timeout=SHORT_TIMEOUT)
process.kill()
stdout_data.extend(stdout.decode().splitlines())
stderr_data.extend(stderr.decode().splitlines())
elapsed_time = time.perf_counter() - time_start
return DockerComposeResult(process.returncode, stdout_data, stderr_data, self, elapsed_time)
except Exception as e:
logging.critical(f"Error executing docker compose up command: {e}")
elapsed_time = time.perf_counter() - time_start
return DockerComposeResult(1, stdout_data, stderr_data, self, elapsed_time)
def __repr__(self):
return f"{self.tutorial.name} {self.case_combination}"
def __write_logs(self, stdout_data: List[str], stderr_data: List[str]):
with open(self.system_test_dir / "stdout.log", 'w') as stdout_file:
stdout_file.write("\n".join(stdout_data))
with open(self.system_test_dir / "stderr.log", 'w') as stderr_file:
stderr_file.write("\n".join(stderr_data))
def __prepare_for_run(self, run_directory: Path):
"""
Prepares the run_directory with folders and datastructures needed for every systemtest execution
"""
self.__copy_tutorial_into_directory(run_directory)
self.__copy_tools(run_directory)
self.__put_gitignore(run_directory)
host_uid, host_gid = self.__get_uid_gid()
self.params_to_use['PRECICE_UID'] = host_uid
self.params_to_use['PRECICE_GID'] = host_gid
def run(self, run_directory: Path):
"""
Runs the system test by generating the Docker Compose file, copying everything into a run folder, and executing docker-compose up.
"""
self.__prepare_for_run(run_directory)
std_out: List[str] = []
std_err: List[str] = []
docker_build_result = self._build_docker()
std_out.extend(docker_build_result.stdout_data)
std_err.extend(docker_build_result.stderr_data)
if docker_build_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Could not build the docker images, {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=0,
fieldcompare_time=0)
docker_run_result = self._run_tutorial()
std_out.extend(docker_run_result.stdout_data)
std_err.extend(docker_run_result.stderr_data)
if docker_run_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Could not run the tutorial, {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=0)
fieldcompare_result = self._run_field_compare()
std_out.extend(fieldcompare_result.stdout_data)
std_err.extend(fieldcompare_result.stderr_data)
if fieldcompare_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Fieldcompare returned non zero exit code, therefore {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=fieldcompare_result.runtime)
# self.__cleanup()
self.__write_logs(std_out, std_err)
return SystemtestResult(
True,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=fieldcompare_result.runtime)
def run_for_reference_results(self, run_directory: Path):
"""
Runs the system test by generating the Docker Compose files to generate the reference results
"""
self.__prepare_for_run(run_directory)
std_out: List[str] = []
std_err: List[str] = []
docker_build_result = self._build_docker()
std_out.extend(docker_build_result.stdout_data)
std_err.extend(docker_build_result.stderr_data)
if docker_build_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Could not build the docker images, {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=0,
fieldcompare_time=0)
docker_run_result = self._run_tutorial()
std_out.extend(docker_run_result.stdout_data)
std_err.extend(docker_run_result.stderr_data)
if docker_run_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Could not run the tutorial, {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=0)
self.__write_logs(std_out, std_err)
return SystemtestResult(
True,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=0)
def run_only_build(self, run_directory: Path):
"""
Runs only the build commmand, for example to preheat the caches of the docker builder.
"""
self.__prepare_for_run(run_directory)
std_out: List[str] = []
std_err: List[str] = []
docker_build_result = self._build_docker()
std_out.extend(docker_build_result.stdout_data)
std_err.extend(docker_build_result.stderr_data)
if docker_build_result.exit_code != 0:
self.__write_logs(std_out, std_err)
logging.critical(f"Could not build the docker images, {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=0,
fieldcompare_time=0)
self.__write_logs(std_out, std_err)
return SystemtestResult(
True,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=0,
fieldcompare_time=0)
def get_system_test_dir(self) -> Path:
return self.system_test_dir

View file

@ -0,0 +1,39 @@
from dataclasses import dataclass
import yaml
from typing import Optional
@dataclass
class SystemtestArguments:
arguments: dict[str, str]
@classmethod
def from_args(cls, cmd_args):
if not cmd_args:
return cls({})
params_provided = cmd_args.split(",")
arguments = {}
for param in params_provided:
key, value = param.split(":")
arguments[key] = value
return cls(arguments)
@classmethod
def from_yaml(cls, yml_file):
if not yml_file:
return cls({})
arguments = {}
with open(yml_file, 'r') as f:
arguments = yaml.safe_load(f)
return cls(arguments)
def __repr__(self):
return f"{self.arguments}"
def contains(self, argument_key):
return argument_key in self.arguments.keys()
def get(self, argument_key) -> Optional[str]:
return self.arguments[argument_key]

View file

@ -0,0 +1,109 @@
from dataclasses import dataclass, field
from typing import Optional, List, Dict
from metadata_parser.metdata import Tutorials, Tutorial, Case, CaseCombination, ReferenceResult
import yaml
@dataclass
class TestSuite:
name: str
cases_of_tutorial: Dict[Tutorial, List[CaseCombination]]
reference_results: Dict[Tutorial, List[ReferenceResult]]
def __repr__(self) -> str:
return_string = f"Test suite: {self.name} contains:"
for tutorial, cases in self.cases_of_tutorial.items():
return_string += f"""
{tutorial.name}
cases: {cases}
reference_results: {self.reference_results[tutorial]}"""
return return_string
class TestSuites(list):
"""
Represents the collection of testsuites read in from the tests.yaml
"""
def __init__(self, testsuites: List[TestSuite]):
self.testsuites = testsuites
@classmethod
def from_yaml(cls, path, parsed_tutorials: Tutorials):
"""
Creates a TestSuites instance from a YAML file.
Args:
path: The path to the YAML file.
Returns:
An instance of TestSuites.
"""
testsuites = []
with open(path, 'r') as f:
data = yaml.safe_load(f)
test_suites_raw = data['test_suites']
for test_suite_name in test_suites_raw:
case_combinations_of_tutorial = {}
reference_results_of_tutorial = {}
# iterate over tutorials:
for tutorial_case in test_suites_raw[test_suite_name]['tutorials']:
tutorial = parsed_tutorials.get_by_path(tutorial_case['path'])
if not tutorial:
raise Exception(f"No tutorial with path {tutorial_case['path']} found.")
# initialize the datastructure for the new Testsuite
if tutorial not in case_combinations_of_tutorial:
case_combinations_of_tutorial[tutorial] = []
reference_results_of_tutorial[tutorial] = []
all_case_combinations = tutorial.case_combinations
case_combination_requested = CaseCombination.from_string_list(
tutorial_case['case_combination'], tutorial)
if case_combination_requested in all_case_combinations:
case_combinations_of_tutorial[tutorial].append(case_combination_requested)
reference_results_of_tutorial[tutorial].append(ReferenceResult(
tutorial_case['reference_result'], case_combination_requested))
else:
raise Exception(
f"Could not find the following cases {tutorial_case['case-combination']} in the current metadata of tutorial {tutorial.name}")
testsuites.append(TestSuite(test_suite_name, case_combinations_of_tutorial,
reference_results_of_tutorial))
return cls(testsuites)
def __iter__(self):
return iter(self.testsuites)
def __getitem__(self, index):
return self.testsuites[index]
def __setitem__(self, index, value):
self.testsuites[index] = value
def __len__(self):
return len(self.testsuites)
def get_by_name(self, name_to_search) -> Optional[TestSuite]:
"""
Retrieves a testsuite by its name.
Args:
name_to_search: The name of the testsuite to search for.
Returns:
The component with the specified name, or None if not found.
"""
for testsuite in self.testsuites:
if testsuite.name == name_to_search:
return testsuite
return None
def __repr__(self) -> str:
return_str = ""
for tests_suite in self.testsuites:
return_str += f"{tests_suite}\n\n"
return return_str

View file

@ -0,0 +1,97 @@
test_suites:
openfoam_adapter_pr:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-openfoam
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-openfoam.tar.gz
- path: perpendicular-flap
case_combination:
- fluid-openfoam
- solid-calculix
reference_result: ./perpendicular-flap/reference-results/fluid-openfoam_solid-calculix.tar.gz
openfoam_adapter_release:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-openfoam
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-openfoam.tar.gz
fenics_test:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-fenics
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-fenics.tar.gz
nutils_test:
tutorials:
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-nutils
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-nutils.tar.gz
calculix_test:
tutorials:
- path: perpendicular-flap
case_combination:
- fluid-openfoam
- solid-calculix
reference_result: ./perpendicular-flap/reference-results/fluid-openfoam_solid-calculix.tar.gz
su2_test:
tutorials:
- path: perpendicular-flap
case_combination:
- fluid-su2
- solid-fenics
reference_result: ./perpendicular-flap/reference-results/fluid-su2_solid-fenics.tar.gz
elastic_tube_1d_test:
tutorials:
- path: elastic-tube-1d
case_combination:
- fluid-cpp
- solid-cpp
reference_result: ./elastic-tube-1d/reference-results/fluid-cpp_solid-cpp.tar.gz
- path: elastic-tube-1d
case_combination:
- fluid-python
- solid-python
reference_result: ./elastic-tube-1d/reference-results/fluid-python_solid-python.tar.gz
- path: elastic-tube-1d
case_combination:
- fluid-cpp
- solid-python
reference_result: ./elastic-tube-1d/reference-results/fluid-cpp_solid-python.tar.gz
release_test:
tutorials:
- path: elastic-tube-1d
case_combination:
- fluid-cpp
- solid-python
reference_result: ./elastic-tube-1d/reference-results/fluid-cpp_solid-python.tar.gz
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-nutils
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-nutils.tar.gz
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-fenics
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-fenics.tar.gz
- path: flow-over-heated-plate
case_combination:
- fluid-openfoam
- solid-openfoam
reference_result: ./flow-over-heated-plate/reference-results/fluid-openfoam_solid-openfoam.tar.gz
- path: perpendicular-flap
case_combination:
- fluid-openfoam
- solid-calculix
reference_result: ./perpendicular-flap/reference-results/fluid-openfoam_solid-calculix.tar.gz
- path: perpendicular-flap
case_combination:
- fluid-su2
- solid-fenics
reference_result: ./perpendicular-flap/reference-results/fluid-su2_solid-fenics.tar.gz

View file

@ -0,0 +1,43 @@
#!/bin/bash
# Run this script at the root of the repository to generate PNG files from each precice-config.xml
set -e -u
visualize_config(){
(
if [[ "$1" == quickstart ]]; then
outfile="images/quickstart-precice-config"
else
outfile="images/tutorials-$1-precice-config"
fi
cd "$1"
if [ -f precice-config.xml ]; then
echo "Visualizing the configuration in $1"
mkdir -p images
precice-config-visualizer -o "$outfile.dot" precice-config.xml
# Special case, to be removed once bug https://github.com/precice/config-visualizer/issues/22
if [[ "$1" == partitioned-heat-conduction-direct ]]; then
sed 's/compound=True;//' --in-place "$outfile.dot"
fi
dot -Tpng "$outfile.dot" > "$outfile.png"
rm "$outfile.dot"
fi
)
}
export -f visualize_config
python3 -m venv .venv
. .venv/bin/activate
pip install precice-config-visualizer
tutorials=$(find . -maxdepth 1 -type d -not -name ".*" | sed "s/^.\///")
if command -v parallel &> /dev/null; then
parallel visualize_config ::: "$tutorials"
else
visualize_config ::: "$tutorials"
fi