Initial Overleaf Import
BIN
catch22/images/CPU1Bus.png
Normal file
|
After Width: | Height: | Size: 299 KiB |
BIN
catch22/images/D-FlipFlop-2.jpeg
Normal file
|
After Width: | Height: | Size: 172 KiB |
BIN
catch22/images/Festverdrahtet.png
Normal file
|
After Width: | Height: | Size: 291 KiB |
BIN
catch22/images/Hauptspeichertypen.png
Normal file
|
After Width: | Height: | Size: 442 KiB |
BIN
catch22/images/MultHW.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
catch22/images/OBFA.jpeg
Normal file
|
After Width: | Height: | Size: 194 KiB |
BIN
catch22/images/Screenshot 2022-12-08 at 11.30.56.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
catch22/images/Screenshot 2022-12-08 at 11.31.05.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
catch22/images/Screenshot 2022-12-13 at 12.00.12.png
Normal file
|
After Width: | Height: | Size: 299 KiB |
BIN
catch22/images/Screenshot 2022-12-13 at 12.00.45.png
Normal file
|
After Width: | Height: | Size: 572 KiB |
BIN
catch22/images/Timediagramm.pdf
Normal file
BIN
catch22/images/TwoBitAdder.jpeg
Normal file
|
After Width: | Height: | Size: 204 KiB |
BIN
catch22/images/XOR.jpeg
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
catch22/images/Zeilenzugriff.png
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
catch22/images/waveform.png
Normal file
|
After Width: | Height: | Size: 8.2 KiB |
BIN
catch22/images/zugriff.png
Normal file
|
After Width: | Height: | Size: 280 KiB |
151
catch22/main.tex
Normal file
@ -0,0 +1,151 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage[landscape]{geometry}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%x
|
||||
{\normalfont\large\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{-1explus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{CATCH22 UniBas}
|
||||
|
||||
|
||||
\begin{document}
|
||||
\newcommand{\mcv}{\mathcal{V}}
|
||||
\newcommand{\mcf}{\mathcal{F}}
|
||||
\newcommand{\mcp}{\mathcal{P}}
|
||||
\newcommand{\mcc}{\mathcal{C}}
|
||||
\newcommand{\mcs}{\mathcal{S}}
|
||||
\newcommand{\sig}{\mathcal{S} = \langle \mathcal{V},\mathcal{C},\mathcal{F},\mathcal{P}\rangle}
|
||||
\newcommand{\natzero}{$\mathbb{N}_0$}
|
||||
\newcommand{\natone}{$\mathbb{N}_1$}
|
||||
\newcommand{\mci}{\mathcal{I}}
|
||||
\newcommand{\intp}{\mathcal{I}=\langle U, \cdot^\mathcal{I}}
|
||||
\newcommand{\toia}[1]{{#1}^{\mathcal{I},\alpha}}
|
||||
\newcommand{\iam}[1]{\mathcal{I},\alpha\models #1}
|
||||
\newcommand{\niam}[1]{\mathcal{I},\alpha\not\models #1}
|
||||
\begin{tiny}
|
||||
|
||||
|
||||
|
||||
\raggedright
|
||||
\footnotesize
|
||||
|
||||
\begin{center}
|
||||
\Large{\textbf{Spik CATCH22}} \\
|
||||
\end{center}
|
||||
\begin{multicols*}{3}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
|
||||
\section{Gatter,Flip-Flop}
|
||||
\subsection{Von Neuman Architecture}
|
||||
|
||||
\includegraphics[scale=0.15]{images/Screenshot 2022-12-08 at 11.30.56.png}\\
|
||||
\textbf{Modern Diagram}\\
|
||||
\includegraphics[scale=0.15]{images/Screenshot 2022-12-08 at 11.31.05.png}
|
||||
\textbf{Logische Schaltungen mit Transistoren}\\
|
||||
Generationen: 1. RTL = resistor-transistor logic; 2. TTL = transistor-transistor logic; 3. MOS = metal-oxide semiconductor logic; 4. CMOS = complementary metal-oxide semiconductor logic.
|
||||
\textbf{Gatter}
|
||||
Ein Gatter Netz bezeichnet eine logische Funktion.\\
|
||||
An XOR logic implemented in NAND gates:\\
|
||||
\includegraphics[scale=0.05]{images/XOR.jpeg}\\
|
||||
First I built a One-Bit Full-Adder:\\
|
||||
\includegraphics[scale=0.05]{images/OBFA.jpeg}\\
|
||||
Then combined two OBFAs to make a Two-Bit Full-Adder:\\
|
||||
\includegraphics[scale=0.05]{images/TwoBitAdder.jpeg}
|
||||
\emph{Taktflanken- vs. taktpegelgesteuertes Flip-Flop:}
|
||||
Ein taktpegelgesteuertes Flip-Flop kann seinen Zustand solange ändern, wie der Taktgeber einen bestimmten Pegel hält. Also wenn der Taktinput von 0 zu 1 geht bis er wieder von 1 zu 0 geht. Während dieser Zeit kann der Zustand geändert werden und die Inputs werden weiter bis zum Output propagiert. \\
|
||||
Hingegen propagiert ein taktflankengesteueretes Flip-Flop die Inputs nur bei der einen oder bei beiden Taktflanken des Taktgebers: also nur im Moment wo 0 zu 1 oder 1 zu 0 geht (einer oder beide Flanken).
|
||||
|
||||
\noindent\emph{D vs. RS Flip-Flop:}
|
||||
Das RS Flip-Flop folgt einem Set-Reset Prinzip. (Set) S=1 setzt den Flip-Flop sofort auf 1. Wenn S wieder auf 0 gesetzt wird wird der Zustand beim output gehalten. Mit (Reset)R = 1 kann der Output wieder auf 0 gsetzt werden. S=R=1 führt bei der Implementation mit NOR Gatter zum 0 Zustand(auch bei notQ). Beim D Flip-Flop wird der Input weitergegeben solange der Taktgeber hoch ist. Was auch immer auf der D Leitung liegt wenn C hoch ist wird weiterpropagiert. Wenn C tief ist wird der Output gehalten. Anders als das RS Flip Flop eignet sich ein D Flip Flop also zum synchronisieren mit einem Oszillator.
|
||||
\noindent\emph{Schaltplan und Zeitdiagram:}\\
|
||||
|
||||
\noindent\includegraphics[scale=0.05]{images/D-FlipFlop-2.jpeg}\\
|
||||
|
||||
\noindent\includegraphics[scale=0.15]{images/Timediagramm.pdf}\\
|
||||
\noindent\includegraphics[scale=0.1]{images/CPU1Bus.png}\\
|
||||
\noindent\includegraphics[scale=0.09]{images/Screenshot 2022-12-13 at 12.00.12.png}\\
|
||||
Instruktionszyklus (durch Kontrolleinheit dirigiert): 1. Hole nächste Instr. mittels PC. 2. Hole Operanden (abhängig v. d. Instr.). 3. Führe Operation durch. 4. Speichere Resultat. 5. Berichtige PC falls nicht schein Teil dedr Operation. \\
|
||||
\noindent\includegraphics[scale=0.1]{images/MultHW.png}\\
|
||||
\noindent\includegraphics[scale=0.1]{images/Festverdrahtet.png}\\
|
||||
\noindent\includegraphics[scale=0.1]{images/Screenshot 2022-12-13 at 12.00.45.png}
|
||||
\textbf{Hauptspeicher:}
|
||||
Um Programmdaten abzuspeichern--> CPU Register zu klein.
|
||||
RAM: Random Access Memory is volitil, schnell und direkt via Systembus les- und schreibbar;
|
||||
ROM:Read Only Memory nur zum lesen, nicht ändern. gut für Microcode-speicher, HW Code (z.B. BIOS);
|
||||
PPROM: Programmable read only memory, änderbar aber langsam: EPPROM(erasable PPROM) inhalt als ganzes kann gelöscht werden. EEPPROM(electrically erasable PPROM), wie RAM aber persistent: sehr langsam.
|
||||
|
||||
\end{multicols*}
|
||||
\end{tiny}
|
||||
\end{document}
|
||||
293
diskmath/main.tex
Normal file
@ -0,0 +1,293 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage[landscape]{geometry}
|
||||
\geometry{a4paper, landscape, margin=0.5in}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{ax}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%x
|
||||
{\normalfont\large\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{-1explus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{Discrete Mathamatics HS22 UniBas}
|
||||
|
||||
\begin{document}
|
||||
\newcommand{\mcv}{\mathcal{V}}
|
||||
\newcommand{\mcf}{\mathcal{F}}
|
||||
\newcommand{\mcp}{\mathcal{P}}
|
||||
\newcommand{\mcc}{\mathcal{C}}
|
||||
\newcommand{\mcs}{\mathcal{S}}
|
||||
\newcommand{\sig}{\mathcal{S} = \langle \mathcal{V},\mathcal{C},\mathcal{F},\mathcal{P}\rangle}
|
||||
\newcommand{\natzero}{\mathbb{N}_0}
|
||||
\newcommand{\natone}{\mathbb{N}_1}
|
||||
\newcommand{\mci}{\mathcal{I}}
|
||||
\newcommand{\intp}{\mathcal{I}=\langle U, \cdot^\mathcal{I}}
|
||||
\newcommand{\toia}[1]{{#1}^{\mathcal{I},\alpha}}
|
||||
\newcommand{\iam}[1]{\mathcal{I},\alpha\models #1}
|
||||
\newcommand{\niam}[1]{\mathcal{I},\alpha\not\models #1}
|
||||
|
||||
|
||||
|
||||
\raggedright
|
||||
\footnotesize
|
||||
\begin{multicols*}{3}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
|
||||
\textbf{Induction}
|
||||
\textbf{Weak: }I.H. only supposes $P(k)$ is true for $k=n$; \textbf{Strong(/Complete): }for all $ k \in \natzero$.\\
|
||||
\emph{Are there statements that only are provable w/ strong ind. but not w/ weak?} We can always use a stronger statement.
|
||||
|
||||
\textbf{Sets}\\
|
||||
\textbf{Def. Set. }A \emph{set} is an \emph{unordered collection} if \emph{distinct} objects.\\
|
||||
\textbf{Axiom of Extensionality.} Two sets are equal if they contain the same elements.\\
|
||||
\textbf{Set Ops.}\\
|
||||
\emph{Intersection: }$A\cap B = \{x|x\in A \text{ and } x\in B\}$\\
|
||||
\emph{Union: }$A\cup B = \{x|x\in A \text{ or } x\in B\}$\\
|
||||
\emph{Set Diff: }$A\setminus B = \{x|x\in A \text{ and } x\not\in B\}$\\
|
||||
\emph{Complement: }$\overline{A}=B\setminus A$, where $A\subseteq B$ and $B$ is the set of \emph{all} considered objects (in a given context).\\
|
||||
\textbf{Theorems. }\\
|
||||
\emph{Commutativity: }
|
||||
$ A\cup B = B\cup A$;
|
||||
$ A\cap B = B\cap A$
|
||||
\emph{Associativity: }$(A\cup B) \cup C = A \cup (B \cup C)$; same for $\cap$.
|
||||
\emph{Distrubution: }$A\cup(B\cap C) = (A\cup B) \cap (A\cup C)$; $A\cap(B\cup C) = (A\cap B) \cup (A\cap C)$
|
||||
\emph{De Morgan: }$\overline{A\cup B}=\overline{A}\cap\overline{B}$;$\overline{A\cap B}= \overline{A}\cup\overline{B}$.
|
||||
\textbf{Theorems for finite sets.}
|
||||
$\lvert A\cup B\rvert = \lvert A\rvert + \lvert B\rvert - \lvert A \cap B\rvert$;\emph{Disjoint sets: }$\lvert A\cup B\rvert = \lvert A\rvert + \lvert B\rvert$\\
|
||||
$\lvert\mathcal{P}(S)\rvert=2^{\lvert S \rvert}$;\\
|
||||
\emph{Card. of a finite Set.} Finite set $S$: $\vert \mathcal{P}(S)\rvert= 2^{\lvert S\rvert}$;
|
||||
$|A|=|B|$ for any sets if there is a bijection from $A$ to $B$;
|
||||
$|A|\leq |B|$ if there is an injection from $A$ to $B$;
|
||||
$|A|<|B|$ if $|A|\leq |B|$ and $|A|\not = |B|$;
|
||||
A set $A$ is countable if $|A|\leq |\mathbb N_0|$;
|
||||
If $A$ is a countable set, then every $B\subseteq A$ is also countable.;
|
||||
If $A$ and $B$ are countable, then so is $A\cup B$.;
|
||||
A countale union of countable sets is countable.;
|
||||
\textbf{Theorem. }The set $B=\{b | b\text{ is a binary tree}\}$ is countable. \emph{Proof.}
|
||||
For $n\in\natzero$ the set $B_n$ of all binary trees with $n$ leaves is finite. With $M = \{B_i | i \in \natzero$ the set of all binary trees is $\bigcup_{B'\in M}B'$. Since $M$ is a countable set of countable sets, $B$ is countable. q.e.d.\\
|
||||
\textbf{Cantors Theorem. } For every set $S$ it holds that $|S|<|\mathcal{P}(S)|$\\
|
||||
\textbf{}{Tuples: }
|
||||
Two $n$-tuples $x=(x_1,x_2,...,x_n)$ and $y=(y_1,y_2,...,y_n)$ are equal$(x=y)$, if $x_i=y_i$ for all $i\in\{1,2,3,...,n\}$
|
||||
|
||||
\underline{\textbf{Binary Relations}}\\
|
||||
\textbf{Properties of homogeneous bin. Relations}
|
||||
homogen. means $R\subseteq A\times A$
|
||||
\textbf{Def. }\emph{reflexive:} Bin.rel. $R$ over set $A$ where for all $a\in A$ it holds that $(a,a)\in R$. \emph{irreflexive:} Bin.rel. $R$ over set $A$ where for all $a\in A$ it holds that $(a,a)\not\in R$. \emph{symmetric:} Bin.rel. $R$ over set $A$ where for all $a,b\in A$ $(a,b)\in R$ iff. $(b,a)\in R$. \emph{asymmetric:} Bin.rel. $R$ over set $A$ where for all $a,b\in A$, if $(a,b)\in R$ then $(b,a)\not\in R$. \emph{antisymmetric:} Bin.rel. $R$ over set $A$ where for all $a,b\in A$, where $a\not=b$, if $(a,b)\in R$ then $(b,a)\not\in R$. \emph{transitivity:} Bin.rel. $R$ over set $A$ where for all $a,b,c\in A$ it holds that if $(a,b)\in R$ and $(b,c)\in R$ then $(a,c)\in R$. \\
|
||||
Bin.rel. $R$ over set $A$:
|
||||
$(i)$ irreflexiv and antisymmetric iff. asymetric.
|
||||
|
||||
\underline{\textbf{Classes of relations}}\\
|
||||
\textbf{Def. Equivalence Relation:} reflexiv, symmetric and transitive\\
|
||||
\underline{Def. Partition:}
|
||||
A \emph{partition} of a set $S$ is a set $P\subseteq\mathcal{P}(S)$ where (i) $X\not=\emptyset$, for all $X\in P$ (ii) $\bigcup{X}_{X\in P} = S$ (iii) $X \cap Y = \emptyset$ for all $X,Y\in P$ with $X\not=Y$. Members of $P$ are called blocks.
|
||||
\emph{Examples: }(i) $\emptyset$ has exactly one partition, namely $\emptyset$. Note that this \emph{is} the partition, not the member of the partition. (ii) For any \emph{non-empty} set $X, P=\{X\}$ is the \emph{trivial partition}.(iii) For any \emph{non-empty proper subset} $A$ of a set $S$. $\{A,S\setminus A\}$ is a partition. Every $s\in S$ is in exactly one block $X\in P $.\\
|
||||
\underline{Def. Equivalence Class:} For an equ.rel. $R$ over $S$ and any$x\in S$ the \emph{equivalence class of $x$} is the set $[x]_R=\{y\in S\mid xRy\}$\\
|
||||
\textbf{Def. Partial order($\preccurlyeq$):} reflexiv, antisymmetric and transitive\\
|
||||
\underline{Theorem }A partition $P$ of a set $S$ induces a relation $\sim_P$ which is an eq.rel. and for an eq.rel.$R$ the set $P=\{[x]_R|x\in S\}$ of equivalence classes is a partition.\\
|
||||
\underline{Def. Least/Greates Element: } Let $\preccurlyeq$ over $S$ be a partial order. $x\in S$ is a \emph{least (greatest)} element of $S$ if \emph{for all $y\in S, x\preccurlyeq y \ (y \preccurlyeq x)$}\\
|
||||
\underline{Theorem: } The least/greates element is unique.\\
|
||||
\underline{Def. minimal/maximal Element: } $\preccurlyeq$ P.O. over $S$. $x\in S$ is \emph{maximal element} if \emph{there is no $y \in S$ with $x\preccurlyeq y$ and $x\not=y$}. $x\in S$ is \emph{minimal element} if \emph{there is no $y \in S$ with $y\preccurlyeq x$ and $x\not=y$}. \\
|
||||
\textbf{Def. Total Relation: } A bin.rel. $R$ is \emph{total (or connex)} if for all $x,y\in S$ either $xRy$ or $yRx$ is true. I.E. there are no elements with $x\not\preccurlyeq x$ and $y\not\preccurlyeq x$.\\
|
||||
\textbf{Def. Total Order: } total and a partial order.\\
|
||||
\textbf{Def. Strict order($\prec$): }irreflexive, asymmetric and transitive\\
|
||||
\underline{Trichotomy: }A bin.rel. $R$ over $S$ is called \emph{trichotomous } if for all $x,y\in S$ either $xRy$ or $yRx$ or $x=y$(exactly one) is true.\\
|
||||
|
||||
\textbf{Def. Strict Total Order(also $\prec$): }trichotomous and strict order\\
|
||||
\underline{Least/greates/minimal/maximal for S.T.O.s: }almost like for P.O.s but $x\not=y$ for least and greatest.
|
||||
|
||||
\textbf{Set ops on Relations}
|
||||
If $R$ is a relation over sets $S_1,...,S_n$, and $R'$ is a rel. over $S'_1,..,S'_n$. then $R\cup R'$ is a relation over $S_1\cup S'_1,...,S_n'\cup S'_n$ \\
|
||||
\textbf{Def. Inverse Relation: }$R\subseteq A \times B$:
|
||||
$R^-1 =\{(b,a)\mid (a,b)\in R$\}\\
|
||||
\textbf{Def. Rel. Composition: }$R_1$ bin.rel. over $A,B$ and $R_2$ bin.rel over $B,C$. $R_2\circ R_1 = \{(a,c)\mid \ there \ is \ a \ b\in B\text{ with }(a,b)\in R_1 \ and \ (b,c)\in R_2\}$\\
|
||||
\underline{Theorem (Associativity of Composition):} For relations $R_1,R_2,R_3$ over sets $S_1,S_2,S_3,S_4$ where $R_i\subseteq S_i \times S_{i-1}$ then $R_1\circ(R_2\circ R_2)=(R_1\circ R_2)\circ R_3$\\
|
||||
\textbf{Def. Transitive closure: }The transitive closure $R^+$ of a rel. $R$ is the smallest relation over $S$ that is transitive and has $R$ as a subset.\\
|
||||
\underline{Theorem: } Let the i-th power of a homogeneous relation $R$ be defined as $R^1=R,\ i=1$ and $R^i=R\circ R^{i-1}, i>1$. Then $R^+=\bigcup_{i=1}^{\infty}R_i$.
|
||||
|
||||
\textbf{Functions}
|
||||
\textbf{Def.} A bin.rel. $R$ over sets $A$ and $B$ is \textit{functional} if for every $a\in A$ there is \textit{at most one} $b \in B$ with $(a,b)\in R$.\\
|
||||
\textbf{Def. Partial Function:} A \textit{partial function} $f$ from a set $A$ to set $B$ ($f:A \nrightarrow B$) is given by \textit{a functional relation} $G$ over $A$ and $B$. $G$ is called the \textit{graph} of $f$.
|
||||
We write $f(x)=y$ for $(x,y)\in G$ and say $y$ is the \textit{image} of $x$ under $f$. If there is no $yt\in B$ with $(x,z)\in G$ $f(x)$ is \emph{undefined}.
|
||||
\textbf{Total Func.} A \emph{(total) function} $f:A \rightarrow B$ is a partial function such that $f(x)$ is defined for all $x\in A$. I.E. the domain = domain of definition.\\
|
||||
The restriction of $f$ to $X$ is the partial function $f |_X : X \nrightarrow $B with $f |_X (x ) = f (x )$ for all $x \in X $.
|
||||
A function $f' :A' \nrightarrow B$ is called an extension of $f$ if $A \subseteq A'$ and $f '|_A = f $. Let $g:B \nrightarrow C$ be a p.f. too, then the \emph{composition of f and g} is $g \circ f: A \nrightarrow C$ with $(g\circ f)(x) = g(f(x))$ if $f$ is def. for $x$ and $g$ is defined for $f(X)$; undefined otherwise. If (total) functions then always defined. Function composition is 1. not commutative and 2. associative (analog relations).\\
|
||||
\textbf{Properties of funcs.}Let $f: A \rightarrow B$ and $g: B\rightarrow C$ be functions, then: $f$ is \emph{injective} if for all $x,y\in A$ with $x\not= y$ it holds that $f(x)\not=f(y)$. If $f$ and $g$ are injective(surjective) then so is $g\circ f$. A functio is \emph{surjective} if image = codomain: for all $y\in B, \exists x\in A : f(x) = y$. A function is bijective if it is surj. and inj. If $f$ is bijective, the \emph{inverse function} of $f$ is the function $f^{-1}:B \rightarrow A: f^{-1}(y)=x\ iff. \ f(x) = y$. Let $f, g$ be bij., then $(g\circ f)^{-1} = f^{-1} \circ g^{-1}$\\
|
||||
\textbf{Permutations.} Let $S$ be a set. A \emph{bijection} $\pi : S \rightarrow S$ is called a \emph{permutation} of $S$. If $\pi$ and $\pi'$ are \emph{disjoint} perms over set $S$, then $\pi\pi'=\pi'\pi$. Every cycle can be expressed as a product of transpositions (2-cycles).
|
||||
|
||||
\textbf{Groups}
|
||||
A\emph{ binary operation} on set $S$ is a function $f: S\times S \rightarrow S$\\
|
||||
\textbf{Def. Group} A group $G= (S,\cdot)$ is given by a set $S$ and bin.op. $\cdot$ on $S$ that satisfy the \emph{group axioms:} 1. \emph{Associativity} ($x\cdot y) \cdot z = x \cdot (y \cdot z ) \ \forall x,y,z\in S$; 2. \emph{Identity elem. }$\exists{e}\in S\ s.t.\ \forall x\in S \ x\circ e = e \cdot x = x$ holds; 3. \emph{Inverse elem. }$\forall x\in S \exists y\in S \ s.t. \ x\cdot y = y \cdot x = e$, where $e$ is the identity element. ABELIAN: if $\cdot$ is also \emph{commutative}. Cardinality $|S|$ is called \emph{order}of the group. \textbf{Uniqueness} $e \land \forall x \ x^{-1}$ are unique in a group. \textbf{Right (Left) Quotient. } $\forall a,b\in S \ x\cdot b = a\ (b\cdot x = a)$ has exectly one solution $x\in S,\ x = a \cdot b^{-1}\ (x = b^{-1}\cdot a)$. ABELIAN: $a/b=b/a$.\textbf{Group Homomorphism} $G=(S,\cdot), G'=(S',\circ)$ a homomorphism from $G$ to $G'$ is a function $f:S\rightarrow S', \ \forall x,y,\in S: f(x\cdot y)=f(x)\circ f(y)$ \textbf{G. Isom.} Homomorphism that is bijectiv. \textbf{Subgroup} $H=(S',\circ)$ of $G=(S, \cdot)$, with $S'\subseteq G$ and $\cdot$ the restriction of $\circ$ to $S'$. Always contains identity element and is closed under group up and inverse.
|
||||
\textbf{Th. Symmetric Gr.} Set $M$, then $Sym(M)=(S,\cdot)$, where $S$ is set of all permutations of $M$ and $\cdot$ is function composition, is a group. $M$ finite write $S_n$. \emph{Order} $Sym(M)=n!$ \emph{Abelian?} not for $n\ge 3$. \textbf{Generating set} of a group $G$ is a set $S'\subseteq S$ s.t. every $e\in S$ can be expressed as a combo of finitely many elems of $S'$ and their inverses.\textbf{Permutation Group} is a group $G=(S,\cdot)|\forall x \in S$ is a permutation of some set $M$ op is func comp. \textbf{NOTICE} Every perm gr is a subgr of a symmetric gr and every such subgr is a perm gr.
|
||||
|
||||
|
||||
\textbf{Divisibiolity and Modular Arithmetic}
|
||||
If $\exists k \in \mathbb Z \text{ s.t. } mk = n \Rightarrow m|n$. $a,b,c,d\in \mathbb Z:d|a \land d|b \Rightarrow \forall x,y\in \mathbb Z$ $d|xa + yb$ and for $n\in \natone: a|b \Rightarrow ac |bc \land a^n|b^n$. \textbf{Th.} Divisibility over $\natzero$ is a partial order. \textbf{Euclid} $\forall a,b \in \mathbb Z, \ b\not=0,\ \exists \ unique\ q,r\in \mathbb Z: a=qb +r\ and\ 0\leq r <|b|, r:= a \ mod \ b$. \textbf{Def. Congruent Modulo} $n>1, a,b\in \mathbb Z: n|a-b \Rightarrow \ a \equiv b \ (mod\ n)$ is an equivalence relation. \textbf{Th.} $a,b\in \mathbb Z,\ n>1: a\equiv b\ (mod\ n) \Leftrightarrow \exists q,q'\in \mathbb Z: a= qn+r \land b=q'n +r$. \textbf{Th. Compatibility} If $a\equiv b(\mod n) \land a\equiv b'(\mod n) \Rightarrow(( a+a' \equiv b+b'; a-a'\equiv b-b; aa'\equiv bb';\forall k\in\mathbb Z: a+k\equiv b+k ; ak \equiv bk;\forall k\in\natzero \ a^k\equiv b^k )\mod n)$. \textbf{Th. Fermat} $a\in \mathbb Z \ and\ p\in\text{Prime }: \not\exists k\in\mathbb Z\ $ s.t. $\ a=kp \Rightarrow a^{p-1} \equiv 1(\mod p)$
|
||||
|
||||
|
||||
\textbf{Graphs}
|
||||
Digraph $G=(N,A):\ |N|=n,\ |A|=m$. The graph induced by $G$ has $E=\{\{u,v\}|(u,v)\in A, u\not=v\}.$ \textbf{Th.} $0\leq |E| \leq m$ no selfloops: $\lceil \frac{m}{2}\rceil \leq |E| \leq m$ and $|V|=n$ always. \textbf{Deg. Lemma} 1. Digraph: $\sum_{v\in N} indeg(v)=\sum_{v\in N} outdeg(V)=|A|$. 2. Graph: $\sum_{v\in V} deg(v) = 2|E|$. \emph{CORROLARY:} Every graph has an even no. of vert. with odd degree.\textbf{Walk length n} digraph(graph): $(v_0,v_1,\ldots ,v_n)\in N(V)^{n+1}|(v_i,v_{i+1})\in A(\{v_i,v_{i+1}\}\in V)\ \forall 0\leq i< n$
|
||||
ALLOWED: $n=0$
|
||||
\textbf{Defs. } $\pi = (v_0,\ldots ,v_n)$ be walk in graph/digraph: \emph{Path} $\forall 0\leq i < j \leq n| v_i\not=v_j$: \emph{Tour} $v_0=v_n$; \emph{Cycle} Tour with $n\geq 3(1)$ (di)graph and $\forall 0\leq i < j \leq n| v_i\not=v_j$.
|
||||
\textbf{Def. }Succ. rel $S_G$ and Reachabilty rel. $R_G$ on graph $G=(V,E)$: $(u,v)\in S_G \Leftrightarrow \{u,v\}\in E$; $(u,v)\in R_G \Leftrightarrow \exists$ walk from $u$ to $v$. Digraphs analog.
|
||||
Recall Trans closure plus: $R^0 =$ identity relation,let G be (di)graph. Then \textbf{Th.} $(u,v)\in S^n_G \Leftrightarrow \exists$ walk of length $n$ from $u$ to $v$. and \textbf{Corr.} $R_G=\bigcup_{n=0}^\infty S^n_G$. AKA $R_G$ is refl. and trans. closure of $S_G$.
|
||||
\textbf{Th.} (Di)graph $G$: $\exists \text{ path from } u \text{ to } v \Leftrightarrow \exists \text{ walk from } u \text{ to } v \Leftrightarrow (u,v)\in R_G$. \textbf{Th. }For GRAPH $R_G$ is an equiv. rel. The equiv classes are called \emph{connected components} of $G$. Connected iff. 1 con.comp. DIGRAPH: Eq. cl. of $R_G^{\text{ind}}$ of induced gr. \emph{weakly con. comp}.
|
||||
\textbf{Def. $M_G$} (Di)graph $G$ if $(u,v)\in R_G \land (v,u)\in R_G$ then \emph{mutually reachable}. DIGRAPH: $M_G$ is an eq.rel. Eq. class of $M_G$ in digraph: \emph{strongly con. comp}.
|
||||
\textbf{Def.} Acyclic $\land$ (graph: \emph{forest}; digraph: \emph{DAG}). Forest and connected = tree. \textbf{Th.} $G$ graph, then is tree iff. $\exists$ exactly one path from any $u\in V$ to any $v\in V$.\textbf{Let G be tree: }\emph{leaf}
|
||||
of G has deg=1. \underline{Th.} $|V|\geq 2\Rightarrow G$ has min. 2 leaves.\underline{Th.}$V\not=\emptyset\Rightarrow|E|=|V|-1$.
|
||||
Let $C$ be con. comp of forest $G\Rightarrow |E|=|V|-|C|$.\textbf{Th. }$G$ gr. with $V\not=\emptyset$. $G$ is $tree\Leftrightarrow acyclic\land connected\Leftrightarrow acyclic\land |E|=|V|-1\Leftrightarrow connected\land |E|=|V|-1\Leftrightarrow \forall u,v\in V \ \exists$ exactly one path from u to v. An \emph{isomprphism} from $G$ to $G'$ is a bijective function $\sigma : V\rightarrow V'$, s.t. $\forall u,v\in V: \{u,v\}\in E \Leftrightarrow \{\sigma (u),\sigma (v)\}\in E'$.
|
||||
\emph{Induced Subgr.} is induced via $V'\subseteq V$. They are the largest subgraphs for a given set of vertices;
|
||||
|
||||
\textbf{\underline{Recurrence}}
|
||||
$g:\mathbb{R}_{0}^{+} \rightarrow \mathbb R$. $O$,$\Omega$ ,$\Theta$
|
||||
\textbf{Th.} Let $A\geq 1, B\geq 1, T(n) = A\cdot T(\frac{n}{B}) + f(n)$. Then: $\exists \epsilon > 0| f(n) = O(n^{log_{B}(A)-\epsilon}) \Rightarrow T(n) = \Theta(n^{log_B A}); f(n)=\Theta(n^{log_B A}) \Rightarrow T(n)= \Theta (n^{log_B A}log_2 n); f(n) = \Omega(n^{log_B (A) + \epsilon}) \Rightarrow T(n)= \Theta(f(n))$.
|
||||
|
||||
|
||||
\textbf{Prop. Logic}
|
||||
|
||||
\textbf{Def. Syntax of PF:}
|
||||
Let $A$ be a set of \textit{atomic propositions}. The set of \textit{propositional formulas over $A$} is inductively defined as follows:\\
|
||||
Every \textit{atom} $a\in A$ is a PF over $A$.\\
|
||||
If $\varphi$ is a PF over $A$, then so is $\lnot\varphi$.
|
||||
If $\varphi \text{ and } \psi$ are PFs over $A$, then so are the \textit{conjunction} $(\varphi \land \psi)$ and the \textit{disjunction} $(\varphi \lor \psi)$.
|
||||
Implication: $(\varphi\rightarrow\psi)\equiv(\varphi\lor\psi)$.
|
||||
Biconditional: $(\varphi\leftrightarrow\psi)\equiv((\varphi\rightarrow\psi)\land(\psi\rightarrow\varphi))$.\\
|
||||
|
||||
\textbf{Def. Semantics of PF:}
|
||||
A \textit{truth assignment (aka. interpretation)} for a set of atomic propositions $A$ is a function $\mathcal I \to \{0,1\}$. A PF $\varphi \text{ (over} A)$ \textit{holds under $\mathcal I$} (written $\mathcal I \models \varphi$) according to the following definitions:\\
|
||||
$\mathcal I \models a$ iff. $\mathcal I(a) = 1$ for all $a\in A$\\
|
||||
$\mathcal I \models \lnot \varphi$ iff. not $\mathcal I \models \varphi$\\
|
||||
$\mathcal I \models (\varphi \land \psi)$ iff. $\mathcal I \models \varphi$ and $\mathcal I \models \psi$\\
|
||||
$\mathcal I \models (\varphi \lor \psi$ iff. $\mathcal I \models \varphi$ or $\mathcal I \models \psi$\\
|
||||
\textbf{Properties of PFs:}
|
||||
A PF $\varphi$ is (i) \textit{satisfiable} if it has at least one model (ii) \textit{unsatisfyable} if it is not satisfiable (iii) \textit{valid} (also \textit{tautology}) if it is true under EVERY interpretation (iv) \textit{falsifiable} if it is no tautology.\\
|
||||
|
||||
\textbf{Def. Equivalence of PEs:}
|
||||
Two PEs $\varphi$ and $\psi$ over $A$ are \textit{(logically) equivalent} ($\varphi\equiv\psi$) if FOR ALL INTERPRETATIONS $\mathcal I$ for $A$ it is true that $\mathcal I \models \varphi$ iff $\mathcal I \models \psi$.\\
|
||||
|
||||
\textbf{Some Equivalences: }
|
||||
(i)Idempotence: $(\varphi\land\varphi)\equiv\varphi$, $(\varphi\lor\varphi)\equiv\varphi$ (ii)Commutativity: $(\varphi\lor\psi)\equiv(\psi\lor\varphi)$,$(\varphi\land\psi)\equiv(\psi\land\varphi)$ (iii) Associativity: ($((\varphi\lor\psi)\lor\chi)\equiv(\psi\lor(\varphi\lor\chi))$, ($((\varphi\land\psi)\land\chi)\equiv(\psi\land(\varphi\land\chi))$ (iv) Absorbtion: $(\varphi\land(\varphi\lor\psi))\equiv\varphi$ ditto with and/or flipped (v) Distributivity: $(\varphi\land(\psi\lor\chi))\equiv((\varphi\land\psi)\lor(\varphi\land\chi))$ ditto with and/or flipped (vi)Double neg.: $\lnot\lnot\varphi\equiv\varphi$ (vii) De Morgan: $\lnot(\varphi\land\psi)\equiv(\lnot\varphi\lor\lnot\psi) $ ditto and/or flipped (viii) Tautology rules: $(\varphi \lor \psi)\equiv\varphi$ and $(\varphi\land\psi)\equiv\psi$ if $\varphi$ is a tautology (ix)Unsatisfiability rules: $(\varphi \lor \psi)\equiv\psi$ and $(\varphi\land\psi)\equiv\varphi$ if $\varphi$ is unsatisfiable.\\
|
||||
|
||||
\textbf{Substitution Theorem: } Let $a$ and $a'$ be equivalent PEs over $A$. Let $c$ be a PE with (at least) one occurrence of the subformula $a$. Then $c$ is equivalent to $c'$, where $c'$ is constructed from $c$ by replacing an occurence of $a$ with $a'$.
|
||||
|
||||
\underline{Literal } is an atomic proposition or its negation. \underline{Clause } is a disjunction of literals. \underline{Monomial} is conjunction of literals.
|
||||
Clause and monomial are also used in case of only one literals.\\
|
||||
\textbf{Def. CNF.} A formula is in CNF if it is a conjunction of clauses.\\
|
||||
\textbf{Def. DNF.} A formula is in DNF if it is a disjunction of monomials.\\
|
||||
\textbf{Algo to construct CNF:} 1. Replace abbreviations $\rightarrow, \leftrightarrow$ by their definition. $(\rightarrow/\leftrightarrow)$-elimination. 2. Use DeMorgan and double neg. rule to move negations insude. 3. Distribute $\lor$ over $\land$ w/ distributivity. 4. (optional) Simplify at end or ar intermediate steps (e.g. w/ idempotence).\\
|
||||
\underline{Theorem. } A formula in CNF is a tautology iff. every clause is a tautology. A formula in DNF is satisfiable iff at least one of its monomials is satisfiable.\\
|
||||
|
||||
\textbf{Def. Model for KB. } Let KB be a \textit{knowledge base} over $A$, i.e. a set of PF over $A$.
|
||||
A truth assignment $\mci$ for $A$ is a model for KB ($\mci\models KB$) if $\mci$ is a model for EVERY formula $\varphi\in KB$.\\
|
||||
\textbf{Props of KBs. } A KB is\\
|
||||
1. satisfiable if KB has at least one model;
|
||||
2. unsatisfiable of KB is not satisfiable;
|
||||
3. valid (or a tautology) if every interpretation is a model for KB;
|
||||
4. falsifiable if KB is no tautology.
|
||||
|
||||
\textbf{Def. Logical Consequence. } Let KB be a set of formulas and $\varphi$ a formula. We say KB logically implies $\varphi$ ($KB\models\varphi$) if all models of KB are also models of $\varphi$.\\
|
||||
\underline{Deduction Th. } $KB \cup \{\varphi\} \models \psi \text{ iff } KB\models(\varphi\rightarrow\psi)$.;
|
||||
\underline{Contraposition The.} $KB \cup \{\varphi\} \models \lnot\psi \text{ iff } KB \cup \{\psi\}\models\lnot\varphi$.;
|
||||
\underline{Contradiction The.} $KB\cup\{\psi\} \text{ is unsatisfiable iff } KB\models\lnot\varphi$ \\
|
||||
|
||||
\textbf{Some Inference Rules. } Modus Ponens $\varphi,(\varphi\rightarrow\psi)_\psi$. Modus Tollens $\lnot\psi,(\varphi\rightarrow\psi)_\lnot\varphi$. AND-Elim. $(\varphi\land\psi)_{\varphi/\psi}$. AND-Intro. $\varphi,\psi_{(\varphi\land\psi)}$. OR-Intro. $\varphi_{(\varphi\lor\psi)}$. Bimp-Elim. $(\varphi\leftrightarrow\psi)_{(\varphi\rightarrow\psi)/(\psi\rightarrow\varphi)}$\\
|
||||
\textbf{Def. Calculus. }A set of inference rules is called a calculus.
|
||||
\textbf{Def. Correctness and Completeness of a Calculus.} We write $KB\vdash_C \varphi$ if there is a derivation of $\varphi$ from KB in calculus $C$.\\
|
||||
A calculus $C$ is \textit{correct} if for all $KB$ and $\varphi$ $KB\vdash_C \varphi$ implies $KB \models \varphi$\\
|
||||
and called \textit{complete} if $KB\models\varphi$ implies $KB\vdash_C \varphi$.\\
|
||||
\textbf{Refutation Copmlete: } A calc. $C$ is \emph{refutation-complete} if $KB\vdash_C \square$(symbol for provably unsatisfiable formulas) for all unsatisfiable $KB$. \textbf{Resoluton Calc.:} Use ref-completeness and show $KB\models \varphi$ by deriving $KB\cup\{\lnot \varphi\}\vdash_R \square$ with \emph{resolution calc. $R$}. Rule: $\frac{C_1 \cup \{X\}, C_2 \cup \{\lnot X\}}{C_1\cup C_2}$. Example from $KB$ to $\Delta$: $KB=\{(P\lor P), ((\lnot P \lor Q) \land (\lnot P \lor R) \land (Q \lor \lnot P) \land R), ((\lnot Q \lor \lnot R \lor S)( \land P) \} \Rightarrow \Delta = \{\{P\},\{\lnot P,Q\},\{\lnot P, R\}, \{R\},\{\lnot Q, \lnot R, S\}\}$
|
||||
\textbf{Predicate Logic}\\
|
||||
\textbf{Signature.}
|
||||
A \textit{signature} (of predicate logic) is a 4-tuple $\mathcal{S} = \langle \mathcal{V},\mathcal{C},\mathcal{F},\mathcal{P}\rangle$ consisting of the following disjoint sets, where all sets are \underline{finite or countable}:\\
|
||||
$\mathcal{V}$ of \textit{variable symbols};
|
||||
$\mathcal{C}$ of \textit{constant symbols};
|
||||
$\mathcal{F}$ of \textit{function symbols};
|
||||
$\mathcal{P}$ of \textit{predicate/relation symbols};
|
||||
Every function symbol $f\in\mathcal{F}$ and predicate symbol $P\in\mathcal{P}$ has an associated \textit{arity} $ar(f),ar(P)\in\mathbb{N}_1$.\\
|
||||
|
||||
\textbf{Term.} Let $\mathcal{S} = \langle \mathcal{V},\mathcal{C},\mathcal{F},\mathcal{P}\rangle$ be a signature. A \textit{term} over $\mathcal{S}$ is inductively constructed according to the following rules:\\
|
||||
1. Every variable symbol $v\in \mathcal{V}$ is a term.;
|
||||
2. Every constant symbol $c\in\mathcal{C}$ is a term.;
|
||||
3. If $t_1,...,t_k$ are terms and $f\in\mathcal{F}$ is a function symbol with arity $k$, then $f(t_1,...,t_k)$ is a term.\\
|
||||
|
||||
\textbf{Formula.} For a signature $\mathcal{S} = \langle \mathcal{V},\mathcal{C},\mathcal{F},\mathcal{P}\rangle$ the set of predicate logic formulas over $\mathcal{S}$ is inductively defined as follows:\\
|
||||
1. If $t_1,...,t_k$ are terms (over $\mathcal{S}$) and $P\in\mathcal{P}$ is a $k$-ary pred. sym., then the \textit{atomic formula (or atom)} $P(t_1,..,t_k)$ is a formula over $\mathcal{S}$.;
|
||||
2. If $t_1,t_2$ are terms (over $\mathcal{S}$), then the \textit{identity} $t_1=t_2$ is a formula over $\mathcal{S}$.;
|
||||
3. If $x\in\mcv$ is a variable symbol and $\varphi$ a formula over $\mcs$,\\ then the \textit{universal quantification} $\forall x\varphi$
|
||||
and the \textit{existential quantification} $\exists x \varphi$ are formulas over $\mcs$.;
|
||||
4. If $\varphi$ is a formula over $\mcs$, then so is its \textit{negation} $\lnot\varphi$.;
|
||||
5. If $\varphi$ and $\psi$ are formulas over $\mcs$, then so are \textit{conjunction} $(\varphi\land\psi)$ and the \textit{disjunction} $(\varphi\lor\psi)$.\\ \emph{Sentence/closed formula }has no free vars. \emph{Open} has a least one free var. \emph{Ground formulas} are closed f. without quantifiers.
|
||||
\textbf{Th.etc.} Ded./Contrapos./Contra.dict theorems also hold. All Equivs from prop. logic hold plus: $(\forall x \varphi \land \forall x \psi) \equiv \forall x(\varphi \land \psi)$; $(\forall x \varphi \lor \forall x \psi) \models \forall x(\varphi \lor \psi)$;$x\not\in free( \psi): (\forall x \varphi \land (\lor) \psi) \equiv \forall x ( \varphi \land (\lor) \psi)$;$\lnot \forall x \varphi \equiv \exists x \lnot \varphi$; all hold same if change forall with exist.
|
||||
\end{multicols*}
|
||||
|
||||
\end{document}
|
||||
BIN
inetsec/images/tcp_cc.png
Normal file
|
After Width: | Height: | Size: 137 KiB |
499
inetsec/main.tex
Normal file
@ -0,0 +1,499 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[8pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage{listings}
|
||||
\usepackage{graphicx}
|
||||
\graphicspath{ {./images/} }
|
||||
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
\usepackage[fontsize=6pt]{fontsize}
|
||||
|
||||
\usepackage[landscape]{geometry}
|
||||
|
||||
\geometry{a4paper, landscape, margin=0.25in}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage[
|
||||
protrusion=true,
|
||||
activate={true,nocompatibility},
|
||||
final,
|
||||
tracking=true,
|
||||
kerning=true,
|
||||
spacing=true,
|
||||
factor=1100]{microtype}
|
||||
\SetTracking{encoding={*}, shape=sc}{40}
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
\microtypecontext{spacing=nonfrench}
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=0.5cm,left=0.5cm,right=0.5cm,bottom=0.5cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{0.1mm}%
|
||||
{0.0001mm}%x
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{0mm}%
|
||||
{0mm}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{Internet and Security FS23}
|
||||
|
||||
\begin{document}
|
||||
\tiny
|
||||
\raggedright
|
||||
\footnotesize
|
||||
|
||||
\begin{multicols*}{4}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
|
||||
|
||||
\section{Intro}
|
||||
\textbf{Packet Switching}:
|
||||
Messages are divided into packets for transmission.
|
||||
Packets travel through links and switches (routers and switches).
|
||||
Store-and-forward transmission is used.
|
||||
End-to-end delay: $d_{\text{end-to-end}} = N\frac{L}{R}$.
|
||||
\textbf{Circuit Switching}:
|
||||
Resources reserved for session duration.
|
||||
Constant transmission rate.
|
||||
Used in traditional telephone networks.
|
||||
Transmission time calculated based on reserved capacity.
|
||||
\textbf{Comparison}:
|
||||
Packet switching offers better sharing of capacity.
|
||||
Circuit switching reserves capacity regardless of demand.
|
||||
Packet switching is more efficient and cost-effective.
|
||||
\textbf{Queuing Delays and Packet Loss}:
|
||||
Output buffers used to store packets.
|
||||
Queuing delays occur during congestion.
|
||||
Full buffer leads to packet loss.
|
||||
|
||||
\subsection{IP}
|
||||
**4.1.2 Network Service Model**
|
||||
\textbf{Network layer services:}
|
||||
- Guaranteed delivery
|
||||
- Guaranteed delivery with bounded delay
|
||||
- In-order packet delivery
|
||||
- Guaranteed minimal bandwidth
|
||||
- Security (encryption)
|
||||
- Internet provides best-effort service with no guarantees on delivery, order, delay, or minimal bandwidth.
|
||||
|
||||
|
||||
\textbf{Router components}:
|
||||
- Input ports
|
||||
- Switching fabric
|
||||
- Output ports
|
||||
- Routing processor
|
||||
- Input ports: Terminate incoming links, perform link-layer functions, and perform lookup for forwarding.
|
||||
- Switching fabric: Connects input ports to output ports for packet transfer.
|
||||
- Output ports: Store and transmit packets, perform link-layer and physical-layer functions.
|
||||
- Routing processor: Performs control-plane functions, executes routing protocols, maintains routing tables, and computes the forwarding table.
|
||||
- Analogy: Packet forwarding is like cars entering and leaving a roundabout, with entry stations determining the roundabout exit based on destination.
|
||||
\textbf{Subnet} Hosts reachable without via router.
|
||||
\subsection{DHCP }
|
||||
provide permanent or temporary IP addresses.
|
||||
It also offers subnet masks, default gateway (router) addresses, and DNS server addresses.
|
||||
DHCP is plug-and-play, suitable for various networks.
|
||||
It uses a client-server model, with clients discovering DHCP servers.
|
||||
If no server is available, a relay agent forwards DHCP messages.
|
||||
The DHCP process involves server discovery, offers, client request, and server acknowledgment.
|
||||
Clients can renew leases to extend IP address usage.
|
||||
DHCP doesn't maintain TCP connections as nodes move between subnets.
|
||||
Protocol: Discover[optional], Offer[optional], Request, ACK.
|
||||
\subsection{DNS}
|
||||
\textbf{ Services:} Host aliasing, mail server aliasing, load distribution.
|
||||
\textbf{Resourse Records:} (name, value, type, ttil)
|
||||
\textbf{Classes of DNS Servers:} Root provide IP of Top-level domain (TLD) server provide IP of Autoritative DNS server. Also local DNS (not part of hierarchy but still important: talks to the other DNS severs.)
|
||||
\textbf{Caching} So not always to go through whole DNS hierarchy.
|
||||
|
||||
\section{Transport Layer} Logical IPC.
|
||||
\emph{Services:} reliable, in-order delivery incl congestion and flow control and connection $\Rightarrow$ TCP. Unreliable, unordered delivery, as is extension of IP $\Rightarrow$ UDP. \emph{Unavailable services:} delayguarantees, bandwidth guarantees.
|
||||
\subsection{Socket Prog}
|
||||
\emph{A socket is:} bi-direct IPC abstraction, comm endpoint, an API for IPC.
|
||||
\emph{Types:} \verb|SOCK_STREAM| connection oriented, guaranteed delivery (e.g. TCP), \verb|SOCK_DGRAM| datagram based (e.g. UDP), \verb|SOCK_RAW| direct access to network layer, \verb|SOCK_PACKET| dir. acc. to link layer. etc.
|
||||
\subsection{Reliable Transfer}
|
||||
\emph{Rel. Channel:} No xtra, just send, recv. \emph{Ch. with Bit Errors (in :} Sender waits for receiver feedback before sending next packet (Stop-and-wait) ACK/NAK. But what if ACK/NAK corrupt? Simle sol: sequence numbers. Then, no NAK, if Duplicate ACK, sender knows following packet not received. \emph{Lossy Ch. with Bit Errors:} Add timer, resend after timeout (sender side). \emph{Performance Stop-and-wait:} $d_{trans}=\frac{L(\text{packet size)}}{R(\text{trans rate})}$, \emph{Sender util (time actually busy sending bits} $U_{sender}=\frac{(L/R)}{RTT + (L/R)}$, very bad. Idea $\Rightarrow$ Pipelining: Keep sending within a send window. \emph{Go-back-N} After timeout: resend everything from oldest unacked packet. ACK as comulative ack (everything upto that sequence number received). Rec. disgards out of order packets (no rec buffering). Go-Back-N (GBN) protocol allows sender to transmit multiple packets without waiting for acknowledgments, limited to N unacknowledged packets.
|
||||
Sender's view: base represents oldest unacknowledged packet, nextseqnum is smallest unused sequence number.
|
||||
Four intervals in sequence numbers: [0, base-1] for acknowledged packets, [base, nextseqnum-1] for sent but unacknowledged packets, [nextseqnum, base+N-1] for immediate sending, and >= base+N for future use.
|
||||
N is the window size, defining the sliding window protocol.
|
||||
Packet sequence numbers are carried in a fixed-length field in the packet header.
|
||||
Receiver discards out-of-order packets and sends ACKs for in-order packets.
|
||||
GBN sender responds to events: invocation from above, receipt of ACK, timeout.
|
||||
GBN incorporates techniques like sequence numbers, cumulative acknowledgments, checksums, and timeout/retransmission.\emph{Selective Repeat:} Resend only specific packet.The receiver individually acknowledges each correctly received packet.
|
||||
Out-of-order packets are buffered at the receiver until missing packets are received.
|
||||
The sender retransmits only those packets suspected to be lost or corrupted.
|
||||
SR uses a window size to limit the number of outstanding, unacknowledged packets.
|
||||
The sender and receiver windows may not always coincide, leading to potential performance issues.
|
||||
Duplicate packets can occur due to packet reordering in network channels.
|
||||
Sequence number reuse is guarded against by ensuring a maximum packet lifetime.
|
||||
Additional techniques and extensions exist to address packet reordering challenges.
|
||||
\subsection{TCP}
|
||||
\emph{Overview:} P2P (1 send, 1 rec), reliable, in order BYTE stream, pipelined, full duplex MSS, connection oriented (handshaking), flow controlled (sender wont overwhelm recv).
|
||||
\textbf{RTT and Timeout:} \verb|EstRTT|$_{new}=(1-\alpha)*$\verb|EstRTT|$_{old}+ \alpha *$ \verb|SampleRTT|. \verb|DevRTT| $= (1-\beta)*$ DevRTT$_{old} + \beta* |$\verb|SampleRTT| $-$ \verb|EstRTT|$|$. \verb|TimeoutInterval| $=$ \verb|EstRTT| $+ 4 *$\verb|DevRTT|. \textbf{Fast Retransmit} sender get 3 ACKS for same data $\Rightarrow$ resend unacked segment with smallest seqno. since likely unacked segment lost, no wait for timeout. \textbf{Flow Control}recv: \verb|rwnd| $\leq$ \verb|RcvBuff|- (\verb|LstByteRcv| - \verb|LstByteRead|), sender: \verb|LstByteSent| - \verb|LstByteAcked| $\leq$ \verb|rwnd|.
|
||||
\subsection{TCP Congestion Control}
|
||||
\emph{Approach:} Additive increase, Multiplicative Decrease (AIMD sawtooth graph bandwidth probing). sender: \verb|LstByteSent| - \verb|LstByteAcked| $\leq$ min(\verb|rwnd|,\verb|cwnd|). Send rate$\approx cwnd/RTT bytes/sec$ see FSM
|
||||
\emph{Fairness} TCP converges to equal bandwidth share.
|
||||
\includegraphics[width=\linewidth]{images/tcp_cc.png}
|
||||
\section{Network Layer}
|
||||
\emph{Forwarding and Routing} Routing algo says what goes in fw table. Forward is local based on dest. host addr within router. Routing is global. Longest prefic matching when doing fw table lookup. Routng algo determens end2end path through network.
|
||||
\subsection{Routing I}
|
||||
global: all routers have complete topology $\Rightarrow$ LinkState algo. decentralized: router knows phys. connected neighbors, and link cost to neighbor. iterative comp, xchange info w neighbor $\Rightarrow$ distance vector algo. static: routes change slowly, dynamic: more quickly, periodic update n response to link cost change.
|
||||
\textbf{Link State Algo Dijkstra:}least cost paht from one node to all others, net topology, link cost know to all nodes (same info), iterative: after k iterations, know least cost patht to k dest.'s. Algo:
|
||||
\begin{lstlisting}
|
||||
Init:
|
||||
N'={u}
|
||||
for all nodes v
|
||||
if v neigh of u
|
||||
D(v) = c(u,v)
|
||||
else D(v) = infinity
|
||||
Loop:
|
||||
find w not in N', s.t. D(w) is a minimum
|
||||
add w to N'
|
||||
update D(V) forall v adj
|
||||
to w and not in N':
|
||||
D(v) = min(D(v), D(w) + c(w,v))
|
||||
/* new cost to v either old cost
|
||||
or know shortest
|
||||
patht w plus cost from w to v */
|
||||
\end{lstlisting}
|
||||
Complexity: $O(n^2)$
|
||||
\textbf{Dist Vect Bellman-Ford:}
|
||||
$d_x(y):=$ cost of least-cost path fom x to y. then $d_x(y)=min_v\{c(x,v) + d_v(y)\}$. $D_x(y)=$ \emph{estimate} of least cost x to y, x maintins dist vector $\mathbf{D}_x=[D_x(y):y\in N]$, node x additionally: knows cost to each neighbor v: c(x,v), hand dist vector for each neighbor.
|
||||
\emph{key idea} from time to time: each node sends own DV to neighbor, when x recv new DV, update own DV using BF equation: $Dx(y)\leftarrow min_v\{c(x,v)+D_v(y)\}$ for each node $y\in N$
|
||||
\begin{lstlisting}
|
||||
Initialization:
|
||||
for all destinations y in N:
|
||||
Dx(y)= c(x,y)
|
||||
/* if y is not a neighbor
|
||||
then c(x,y)= infty*/
|
||||
for each neighbor w
|
||||
Dw(y) = ? for all destinations y in N
|
||||
for each neighbor w
|
||||
send distance vector
|
||||
Dx = [Dx(y): y in N] to w
|
||||
loop
|
||||
wait (until I see a link cost
|
||||
change to some neighbor w or
|
||||
until I receive a distance
|
||||
vector from some neighbor w)
|
||||
for each y in N:
|
||||
Dx(y) = minv{c(x,v) + Dv(y)}
|
||||
if Dx(y) changed for any destination y
|
||||
send distance vector
|
||||
Dx = [Dx(y): y in N] to all neighbors
|
||||
forever
|
||||
\end{lstlisting}
|
||||
Good news travels fast, bad news travels slow (count to infty problem). Solution: poisoned reverse, If Z routes via Y to X: Z tells Y c(Z,Y)=infinite, so Y wont route to X via Z.
|
||||
\subsection{Routing 2}
|
||||
Intra-AS routing: protocol used in same AS, gateway router: edge of own AS, has link to router in other AS. Routing across AS: inter-AS routing protocol.
|
||||
Fw table configured by inter and intra protol : intra for internal dest, intra and inter forn external dests.
|
||||
\emph{Inter AS tasks:} 1. learn from inter AS protocol that subne x is reachable via mult. gateways $\rightarrow$ use info rom intra AS to determine cost of least cost paths to each gateway $\rightarrow$ hot potat routing: chose gateway w. smallest least cost $\rightarrow$ from fw table get interface I that goes to least cost gateway, Enter (x,I) in fw table.
|
||||
\textbf{Intra AS Routing:} RIP: Routing info protocol. DV algo with no of hops as dist metric. DV uses (subnet,hops) pair. RIP routing tables managed by app lvl daemon, advertiuses in UDP periodically.
|
||||
\textbf{BGP} provides each AS with: eBGP: get subnet reachability info from neighboring ASs. iBGP: propagate reachability info to all AS-internal routers. determine 'good' routes to other networks. $\Rightarrow$ allow subents to advertise existence to rest of Internet. \emph{BGP session} two BGP routers (peers) xch BGP msgs: path advertising to different dest network prefixes via semi permanent TCP. When router learns new prefix, creates new entry for prefix in fw table.
|
||||
Advertised prefix attributes: prefix + attribute = "route". \emph{Important attribute:} AS-PATH: AS thorough which prefic ad has passed, NEXT-HOP (is IP of addr of router interface that begins AS-PATH aka where to leave current AS)
|
||||
\textbf{Summary how info gets in FW table:}1. router becomes aware of prefix via BGP ad from others. 2. Determine router output port for prefix: 2.1 use BGP route selection to get best inter-AS route. 2.2 Use OSPF to get best intra AS route leading to best inter-AS route. 2.3 router identifies router port for best route. 3. enter prefix-port entry in fw table.
|
||||
\emph{Why different intra inter AS routing?:} \emph{policy}: inter: admin wants control over how/who traffic routed. intra: single admin, no policy decisions needed. \emph{scale:} hierarchical routing saves table sizes, reduced update traffic. \emph{performance:} intra: can focus on performance. inter: policy may dominate over performance.
|
||||
\section{Data Link Layer}
|
||||
\textbf{Terminology:} \emph{nodes: }hosts and routers. \emph{links:} comm channals connecting adjacent nods along comm path. \emph{frame:} layer-2 packet encapsulating datagram. \emph{Purpose:} data-link layer transfers datagram from node to physically adjacent node via link.
|
||||
\subsection{MAC: TDMA, random access}
|
||||
\emph{Link types:} point-to-point: like ethernet switch to host. broadcast: like old ethernet, 802.11 wireless LAN. \emph{Probelm:} collision. $\Rightarrow$ Media Access Protocol (MAC): Distributed algo, determine how nodes share channel, when who transmit. no out-of-band channel for coordination, same channel for protocol and normal data.
|
||||
\emph{MAC protocol types:} Channel partitioning: TDMA (devide time on channel into rounds, every station fixed slot in round, unused slot go idle) FSMA(devide frequency, stations get fixd freq band, unused trans time in freq bands go idle). Random Acces: if node has packet, transmit at full ch rate R, no a prior coord among nodes, can lead to collision, so MAC protocol specifies hwo to detect and/or recover colisons. All following are random acces.
|
||||
\emph{slotted ALOHA} assumption: all frames same size; time div in equal size, node starts transmitt onlya at slot beginning, synced nodes, if 2 or more nodes transmit in slot, all nodes detect collison. Operation: when node gets fresh frame, transmit in next slot, if no collison, node can send frame in next slot, if collison, node retransmits frame each subsequent slot with prob. p until success. Pros: single active node can cont trans at full rate of ch, highly decent, aka only slots in nodes need to be in sync, simple. Cons: collisons, wasting slots, idle slots, clock synchro. Efficiency (long-run frac of successful slots) N nodes transmit with prob. p., then prob that given node has success $=p(1-p)^{N-1}$, and prob that \emph{any} has succ $=Np(1-p)^{N-1}$. If N goes infty max effc is .37.
|
||||
\emph{CSMA(carrier sense multiple access} listen before transmit, if ch idle, then send, else, defer transmission (aka dont interrupt others). collision still possible since prop delay. if collision, entire packet trans time wasted.
|
||||
\emph{CSMA/CD(coll detect) } CD easy if wired, hard if wireless. Abort if coll detected. \emph{Ethernet CSMA/CD algo:} 1. NIC recv datagram from network layer, makes frame. 2. If NIC senses ch idlem start frame transmiison. If sense that busy, wait until idle, then transmit. 3. If NIC transmit entire frame w.o. detect another transm. NIC is finnished w. frame. 4. If derect another transm while transmitting itself, abort and sens jam sig. 5. after abort, NIC enters binary (exponential) backoff: after m th coll, NIC choose K at random from $\{0,1,2,...,2^m-1\}$. waits K*512 bit times, return to step 2. longer backoff interval when more collisons.
|
||||
\emph{CSMA?CD efficiency:} $T_{prop}=$ max prop delay between 2 nodes in LAN. $t_{trans}=$ time to transmit max-size frame. $efficiency=1/(1+5t_{prop}/t_{trans})$. goes to 1 if tprop to 0 or ttrans to infinity. better than ALOHA, simple, clean, decentralized.
|
||||
\textbf{Taking turns protcols} best if ch part and random access: polling (master node invites slave node to transmit in turn) probelem: polling overhead, latency, single point of failure (master node). Bluetooth uses this. Token Passing: control token passed from node to next node sequencially (token ring). Problem: same as polling.
|
||||
\subsection{Ethernet}
|
||||
connectionless, unreliable (recv NIC no acks or nacks, dropped frame only recovered if higher layer uses RDT (like TCP)). MAC Protocol: unslotted CSMA?CD with binary backoff. Ethernet has link layer protocol and format, and physcal layer implementation via different media like fiber, cable.
|
||||
\emph{Physical Topology:} bus:all nodes in same collision domain. star: (std today) active switch in center, each spoke own Ethernet protocol, no collisions btw. nodes.
|
||||
\emph{Frame structure:} sending adapter encapsulates IP datagram in Ehternet frame. preamble: 7 bytes for synchro btw, recv and sender clock rates. address: 6 byte source, dest MAC addr. type: indicates higher lvl protocol. CRC: cyclic redundancy check at recv, if error, drop frame.
|
||||
\subsection{ARP}
|
||||
32-bit IP addr is network layer addr for interface. used for layer 3 forwarding. MAC address: used 'locally' to get frame from one interface to another physically-connected interface (same network, in IP-addr sense). Each adapter on LAN has unique LAN address (MAC addr). \emph{Question:} How get interfaces MAC addre if have IP addr? Solution: ARP table, each IP on LAN has table: IP/MAC addr map for some LAN nodes (incl TTL).
|
||||
\emph{ARP protocol: same LAN:} A to B: If Bs MAC not in As table: boadcast ARP queary with Bs IP. All nodes recv, B replies to A with Bs MAC (frame to As MAC, unicast). A caches IP/MAC pair in ARP table until times out. ARP is plug-and-play.
|
||||
\emph{ARP routing to other LAN} A sends datagram to B via router R. A makes IP datagram w. sourc IP A dest IP B. A makes link layer fram w. Rs MAC as dest around dataframe. When R gets As frame, removes datagram, passes to IP. R forwards datagram with IP src A, dest B. R makes link layer frame, R MAC as src and Bs MAC as dest and A-to-B IP datagram in frame
|
||||
\subsection{Hubs and Switches}
|
||||
\emph{Hubs:} Physical layer repeaters. Incoming bits go to all other links at same rate. all nodes connected to hub can collide, no frame buffering, no CSMA?CD at hub: host NICs detect collisions. Can interconnect LAN segments with backbone hubs but this makes collision domains bigger and bigger. No interconnect possible with 10BaseT and 100BaseT.
|
||||
\emph{Ethernet switch:} Link layer device. store and forward Ehternet frames. Selective forwarding based on incoming MAC. uses CSMA/CD to access segment. transparent, i.e. hosts are unaware of swicthes. plug-and-play and self-learning: no config! Hosts direclty connect to switch interfaces. switch buffers packets. ethernet protocol on each incoming link, but no collisions; full duplex. each link its own collision domain. can transmit simultaneously if no two dest MAC addrr are same.
|
||||
\emph{Self Learning:} Switch builds switch table when some host wants to send other host smth. I notes src MAC and from which interface it came. If cannout find dest MAC: flood, i.e. forward on all interfaces.
|
||||
\subsection{NAT and Firewall}
|
||||
$\bullet$ NAT operates at the network layer (Layer 3) of the OSI model.\
|
||||
$\bullet$ It typically resides in a router or firewall device.\
|
||||
$\bullet$ NAT translates private IP addresses used within a local network to a public IP address for communication with external networks.\
|
||||
$\bullet$ It maintains a translation table that maps private IP addresses to public IP addresses.\
|
||||
$\bullet$ NAT can be configured in different modes, such as static NAT, dynamic NAT, and port address translation (PAT).\
|
||||
$\bullet$ Static NAT maps a specific private IP address to a specific public IP address.\
|
||||
$\bullet$ Dynamic NAT assigns public IP addresses from a pool of available addresses on a first-come, first-served basis.\
|
||||
$\bullet$ IP pooling is a technique used in dynamic NAT where a range of public IP addresses is allocated for translation.\
|
||||
$\bullet$ NAT supports the migration of services and devices between networks by updating the translation table accordingly.\
|
||||
$\bullet$ IP masquerading, also known as IP spoofing or network address hiding, is a form of NAT where the source IP address of outgoing packets is modified to appear as if they originated from the NAT device itself.\
|
||||
$\bullet$ NAT provides additional security by hiding internal IP addresses from external networks.\
|
||||
$\bullet$ It can also help in load balancing and traffic management by distributing incoming traffic across multiple internal devices.\
|
||||
$\bullet$ Load balancing refers to the distribution of network traffic across multiple servers or devices to optimize resource utilization and improve performance.\
|
||||
$\bullet$ NAT is widely used in home networks, small office networks, and large-scale enterprise networks.\
|
||||
$\bullet$ It plays a crucial role in the adoption of IPv4 in the face of limited address space.\
|
||||
$\bullet$ However, NAT can introduce certain limitations, such as difficulties in hosting certain types of services that require inbound connections.\
|
||||
$\bullet$ The introduction of IPv6, with its larger address space, reduces the need for NAT in future network deployments.
|
||||
|
||||
$\bullet$ A firewall is a network security device that monitors and controls incoming and outgoing network traffic.\
|
||||
$\bullet$barrier between internal networks (e.g., private LAN) and external networks (e.g., the Internet) to enforce security policies.\
|
||||
$\bullet$ Firewalls can be implemented in both hardware and software forms.\
|
||||
$\bullet$ The primary purpose of a firewall is to protect the network from unauthorized access and malicious activities.\
|
||||
$\bullet$ Packet filtering is a basic firewall technique that examines individual packets of network traffic based on pre-defined rules.\
|
||||
$\bullet$ It allows or blocks packets based on criteria such as source/destination IP addresses, ports, and protocols.\
|
||||
$\bullet$ Packet filtering firewalls operate at the network layer (Layer 3) or transport layer (Layer 4) of the OSI model.\
|
||||
$\bullet$ They can be configured to permit or deny specific types of traffic, effectively creating a security perimeter.\
|
||||
$\bullet$ Application gateways, also known as proxy firewalls, operate at the application layer (Layer 7) of the OSI model.\
|
||||
$\bullet$ They act as intermediaries between clients and servers, inspecting and filtering network traffic at the application level.\
|
||||
$\bullet$ Application gateways provide more advanced security features and analyze content of packets, making intelligent decisions based on the application protocol.\
|
||||
$\bullet$ can prevent unauthorized access, perform deep packet inspection, and provide additional security measures like encryption and authentication.\
|
||||
$\bullet$ Firewalls can be configured to support various network security policies, including allowing or blocking specific protocols (e.g., ICMP, TCP, UDP), defining access control lists (ACLs), and setting up virtual private networks (VPNs).\
|
||||
$\bullet$ Firewalls can also implement Network Address Translation (NAT) to hide internal IP addresses and provide an extra layer of security.\
|
||||
$\bullet$ specialized Next-Generation Firewalls (NGFW) combine traditional packet filtering with advanced features like intrusion detection and prevention, deep packet inspection, and application awareness.\
|
||||
Stateless filtering, aka packet filtering, treats each packet in isolation. makes decisions based solely on packet's individual properties, without considering context in network session or connection. Criteria may include source and destination IP addresses, ports, or protocols.
|
||||
|
||||
Stateful filtering,keeps track of active sessions. checks each packet's state and context as part of a larger conversation. more flexible and secure than stateless filtering because understands state of network connections. more resource-intensive.
|
||||
|
||||
\subsection{Switches in Data Center Networks}
|
||||
$\bullet$ Role of Switches: In data center networks, switches play a critical role in directing and controlling data traffic between servers and systems. They can operate at several layers of the OSI model, primarily Layer 2 (data link) and Layer 3 (network).
|
||||
|
||||
$\bullet$ Types of Switches: In data centers, both Ethernet switches (Layer 2) and multilayer switches (Layer 3) are commonly used. Layer 2 switches forward packets based on MAC addresses, while Layer 3 switches also incorporate routing functionality, forwarding packets based on IP addresses.
|
||||
$\bullet$ Challenges: Data center networks face several challenges, including scalability, redundancy, load balancing, fault tolerance, and energy efficiency. There's a need to handle a high volume of traffic and simultaneously support a wide range of applications with varying performance needs.
|
||||
|
||||
$\bullet$ Load Balancing: Load balancing at the application layer (Layer 7) involves distributing network traffic across multiple servers based on the content of the client's request. This approach allows for more intelligent and flexible distribution of traffic. Load balancers can take into account factors such as server load, application type, and session information to make routing decisions. For instance, load balancers can direct web traffic to servers optimized for web hosting, and direct database queries to servers optimized for database operations.
|
||||
|
||||
$\bullet$ Rich Switch Interconnection: In large-scale data centers, it's critical to have a high degree of interconnection between switches to ensure low latency, high bandwidth, and fault tolerance. Two common architectures are:
|
||||
\subsection{Wireless: Concepts, CDMA}
|
||||
\emph{Wireless Link Characteristics:}
|
||||
$\bullet$ Signal Strength Attenuation and Path Loss: The strength of a wireless signal decreases (attenuates) as it travels further from the source. This can be affected by the transmission power, frequency of the signal, distance traveled, and environmental factors.
|
||||
|
||||
$\bullet$ Multipath Propagation: In wireless communication, the signal from the transmitter can reach the receiver via multiple paths due to reflection, refraction, and scattering. This can cause interference at the receiver and may result in signal fading.
|
||||
|
||||
$\bullet$ Interference: Wireless links are susceptible to interference from other devices using the same frequency band. This interference can degrade the performance of the wireless network.
|
||||
\emph{Wireless host:} laptop, smartphone etc. run apps, may be stat or mobile.
|
||||
\emph{base station:} usually connect to wired network, relay - resp for sending packets btwn wires network and wireless hosts in its area.
|
||||
\emph{wireless link:} usually used to connect mobile to base station, or as backbone link, Multiple access protocol coord link access, various data rates, transmission distance.
|
||||
\emph{Infrastructure mode:} base station connects mobiles into wired network. handoff: mobiles change base station.
|
||||
\emph{ad hoc mode:} no base satiton. nodes only transmit to other nodes in within link coverage. nodes self organize into network.
|
||||
\subsection{Wireless: 802.11}
|
||||
$\bullet$ 802.11 LAN Architecture: An 802.11 wireless LAN typically consists of one or more Access Points (APs) and multiple wireless clients. The APs form the basis of the network and connect the wireless clients to the wired network infrastructure. The wireless clients can operate in two modes: infrastructure mode (connected to an AP) and ad-hoc mode (directly connected to other wireless clients, forming a peer-to-peer network).
|
||||
|
||||
$\bullet$ Channels: The 802.11 standard uses specific frequency bands that are divided into channels. In the 2.4GHz band (used by 802.11b/g/n), there are typically 11 channels in North America, and 13 in most of Europe. The 5GHz band (used by 802.11a/n/ac/ax) has more available channels. Each channel has a certain width, measured in MHz. The choice of channel can influence the wireless network's performance due to potential overlap with nearby networks and interference.
|
||||
|
||||
$\bullet$ Association: Before a wireless client can send or receive data, it needs to associate with an AP. This process involves the client scanning for available networks, selecting one, authenticating, and then associating with the selected AP. Once association is complete, the client can start transmitting and receiving data.
|
||||
|
||||
$\bullet$ Passive and Active Scanning: Passive scanning involves the client listening for Beacon Frames from nearby APs. These beacons contain all the information a client needs to understand the capabilities of the AP. Active scanning, on the other hand, involves the client sending a Probe Request and then waiting for a Probe Response from an AP. This can speed up the process of finding an AP but consumes more power and bandwidth.
|
||||
|
||||
$\bullet$ Multiple Access and Collision Avoidance: To avoid collisions (i.e., two clients trying to send data at the same time), 802.11 uses a method called Carrier Sense Multiple Access with Collision Avoidance (CSMA/CA). When a device wants to transmit, it first listens to the wireless medium to see if other devices are transmitting. If the medium is busy, the device waits for it to become free. It then waits a random amount of additional time before starting its transmission, reducing the chance of a collision. If the medium is free, it can begin transmission immediately. Additionally, 802.11 devices can use RTS (Request to Send) and CTS (Clear to Send) control frames to reserve the medium for a certain amount of time, further reducing the chances of a collision.
|
||||
\subsection{Mobile Internet (cellular, 5G etc.)}
|
||||
Cellular Network Architecture:
|
||||
$\bullet$ A cellular network consists of Mobile Stations (MSs), Base Transceiver Stations (BTSs), Base Station Controllers (BSCs), Mobile Switching Centers (MSCs), and a network backbone.
|
||||
$\bullet$ The MS is the user's device, such as a cellphone.
|
||||
$\bullet$ The BTS is the closest network entity to the MS, often referred to as a cell tower.
|
||||
$\bullet$ The BSC controls multiple BTSs and manages their radio resources.
|
||||
$\bullet$ The MSC manages multiple BSCs and serves as a bridge between the cellular network and the PSTN (Public Switched Telephone Network).
|
||||
$\bullet$ The network backbone includes the servers and high-speed data links that manage internet connectivity.
|
||||
|
||||
The First Hop:
|
||||
$\bullet$ The first hop in cellular internet access is between the MS and the BTS. Data is transferred using radio waves.
|
||||
|
||||
Mobility Handling and GSM Indirect Routing to Mobile:
|
||||
$\bullet$ In GSM (Global System for Mobile Communications), when a call is made to a mobile user, the call is first routed to the user's home network. The home network then queries the current location of the mobile device in a database (the HLR - Home Location Register) and routes the call to that location.
|
||||
$\bullet$ If the mobile user moves during the call, the network performs a handover to transfer the call to a new cell tower without interrupting the call.
|
||||
|
||||
Handoff with Common MSC and Between MSCs:
|
||||
$\bullet$ Handoff or handover is the process of transferring an ongoing call or data session from one cell network to another.
|
||||
$\bullet$ Intra-MSC handover happens when the MS moves from one cell to another within the same MSC. The MSC updates the necessary data and controls the handover.
|
||||
$\bullet$ Inter-MSC handover happens when the MS moves from a cell controlled by one MSC to a cell controlled by another MSC. In this case, the original MSC requests the new MSC to allocate the necessary resources and then transfers control to the new MSC. The original MSC also updates the HLR with the new location of the MS.
|
||||
\section{Multimedia Networking}
|
||||
$\bullet$ Requirements:
|
||||
|
||||
Bandwidth: Multimedia files, especially video, can be large and thus require significant bandwidth for smooth streaming.
|
||||
|
||||
Low Latency: Real-time or interactive multimedia (like video calls or live streaming) requires low latency for synchronization between users and maintaining the quality of the service.
|
||||
|
||||
Jitter Control: The variation in packet arrival times, or jitter, should be minimized. High jitter can lead to a choppy audio or video playback experience.
|
||||
|
||||
Data Loss: Given that the Internet Protocol allows for packet loss, multimedia content needs to be transferred in such a way that loss doesn't significantly degrade the quality.
|
||||
|
||||
$\bullet$ Solutions:
|
||||
|
||||
Compression: Codecs reduce file sizes, making them easier to transmit over networks. For example, H.264 and VP9 for video, and AAC and MP3 for audio.
|
||||
|
||||
Adaptive Bitrate Streaming: Techniques like DASH or HLS dynamically adjust the quality of a video stream in real time based on network conditions and CPU utilization.
|
||||
|
||||
Buffering: To compensate for network variability, players can buffer, or temporarily download, a certain amount of video or audio before starting playback. This helps ensure smooth playback even when network conditions fluctuate.
|
||||
|
||||
Error Correction Techniques: Forward Error Correction (FEC) and Automatic Repeat reQuest (ARQ) are used to detect and correct errors that occur during the transmission of data.
|
||||
|
||||
Quality of Service (QoS): Networks can implement QoS mechanisms to prioritize multimedia traffic and ensure it receives the necessary bandwidth and low latency.
|
||||
|
||||
CDNs (Content Delivery Networks): CDNs distribute multimedia content closer to the user, reducing the distance that data has to travel and hence improving speed and reducing latency.
|
||||
|
||||
Protocols: Use of specialized protocols like RTP for transport, RTCP for quality feedback, and control protocols like RTSP or SIP for setup and management of multimedia sessions.
|
||||
Network Performance Requirements: Delay, packet loss, bandwidth, and jitter are the primary factors for transmitting audio or video. While multimedia applications are delay-sensitive and require a certain bandwidth, they can tolerate infrequent losses.
|
||||
|
||||
Challenges: All packets currently get the same best-effort service, apart from the loss=0 guarantee provided by TCP. No other performance guarantees exist. Scalability also poses a challenge, especially for one-to-many or many-to-many transmissions.
|
||||
|
||||
Multimedia Networking Solutions:
|
||||
|
||||
$\bullet$ Performance Guarantees: Add Quality of Service (QoS) to the internet stack for performance guarantees. Examples are IntServ and DiffServ.
|
||||
|
||||
$\bullet$ IP Multicast: There have been several attempts at this but with limited success.
|
||||
|
||||
$\bullet$ Adaptive Applications: They make the most out of the best-effort service. These include streaming and (semi-)real-time support over UDP and TCP.
|
||||
|
||||
$\bullet$ Application-Layer Solutions: These include Content Delivery Networks (CDNs), Peer-to-Peer (P2P) networks, application-layer multicast, etc. They have been increasingly successful.
|
||||
$\bullet$ Multimedia Networking: Multimedia networking involves the transmission of different types of data like text, graphics, video, voice, and audio over networks. The key aspect of multimedia networking is that the data is typically synchronized and continuous.
|
||||
$\bullet$ Performance Requirements: These include high bandwidth for large amounts of data, low latency for synchronization and real-time applications, low jitter (variance in delay) for stable and consistent stream quality, and minimal packet loss to prevent quality degradation.
|
||||
$\bullet$ Streaming Media: Streaming involves transmitting media over the network in a continuous stream. It can be either stored streaming (for pre-recorded content like Netflix videos) or live/interactive streaming (for real-time communication like Skype or Zoom calls).
|
||||
UDP for Streaming:
|
||||
$\bullet$ The server sends data at a rate suitable for the client, often ignoring network congestion. Often, the send rate equals the encoding rate, which is constant.
|
||||
$\bullet$ The fill rate equals the constant rate minus packet loss.
|
||||
$\bullet$ A short playout delay (a few seconds) compensates for jitter.
|
||||
$\bullet$ Error recovery is applied if time permits.
|
||||
|
||||
TCP for Streaming:
|
||||
$\bullet$ The server sends data at the maximum rate possible under TCP.
|
||||
$\bullet$ The buffer fill rate fluctuates due to TCP's congestion control.
|
||||
$\bullet$ A larger playout delay smooths the TCP delivery rate.
|
||||
$\bullet$ TCP is popular for streaming over HTTP because it passes more easily through firewalls.
|
||||
$\bullet$ Play-Out Buffering: This mechanism is used at the receiver end to handle network jitter and compensate for the variable packet arrival time. Data packets are temporarily stored and then played out at a consistent rate, maintaining smooth playback.
|
||||
|
||||
$\bullet$ Client Buffering: This involves storing a portion of the received media before it begins playing. This buffer can help handle network delays and fluctuation in delivery rates. Larger buffers can handle larger network delays but increase latency.
|
||||
$\bullet$ RTSP (Real Time Streaming Protocol): This network control protocol is designed to control the delivery of streaming media servers. It supports operations like pause, rewind, and fast forward.
|
||||
$\bullet$ Real-time Interactive Media: This involves live interaction between users, such as video calls. A codec (coder-decoder) is used for compressing and decompressing the media for transmission.
|
||||
|
||||
$\bullet$ RTP/RTCP (Real-time Transport Protocol/Real-time Transport Control Protocol): RTP is a protocol used to transport real-time data, like audio and video, over networks. RTCP works alongside RTP, providing out-of-band control information and periodic transmission statistics for quality of service (QoS) monitoring.
|
||||
|
||||
$\bullet$ SIP (Session Initiation Protocol): This is a signaling protocol used to establish, modify, and terminate multimedia sessions, like VoIP calls.
|
||||
|
||||
$\bullet$ SDP (Session Description Protocol): SDP describes multimedia sessions, providing necessary information for participants to join a session. It is commonly used with RTSP and SIP.
|
||||
|
||||
$\bullet$ H.323: This is an ITU-T standard for audio, video, and data communications over IP networks. It encompasses various protocols for call setup, control, and media transport.
|
||||
|
||||
$\bullet$ Mitigating Delay and Loss: Several techniques can be used to manage delay and loss in multimedia networking, such as buffering, Forward Error Correction (FEC), interleaving, and error concealment techniques. These can help ensure media is received and played back correctly, even if some data is lost or delayed.
|
||||
|
||||
$\bullet$ Multimedia QoS (Quality of Service): QoS mechanisms can be used to ensure satisfactory performance for multimedia traffic by prioritizing certain types of traffic, limiting delay, jitter, and packet loss, and guaranteeing a certain level of service.
|
||||
|
||||
$\bullet$ DASH (Dynamic Adaptive Streaming over HTTP): This is an adaptive bitrate streaming technique that enables high-quality streaming of media content over the internet. DASH works by adjusting the quality of a media stream in real time, based on the viewer's network and playback conditions.
|
||||
|
||||
$\bullet$ CDNs (Content Delivery Networks): CDNs are a system of distributed servers that deliver content to a user based on their geographic location, the origin of the webpage, and the content delivery server. This helps improve performance and scalability.
|
||||
|
||||
$\bullet$ P2P (Peer-to-Peer) Networks: In peer-to-peer networks, direct sharing of content between peers eliminates the need for central servers, which can efficiently distribute high-demand content and reduce server load. Examples include BitTorrent and certain live streaming platforms.
|
||||
\begin{enumerate}
|
||||
\item \textbf{Application Layer (HTTP or HTTPS):} The user enters a URL in the web browser, or clicks on a link. The browser formulates an HTTP (or HTTPS) GET request.
|
||||
\item \textbf{DNS (Domain Name System):} The browser needs to resolve the domain name to an IP address. It sends a DNS query to a DNS server to obtain the IP address associated with the domain.
|
||||
|
||||
\item \textbf{DHCP (Dynamic Host Configuration Protocol):} If the client doesn't have an IP address, it uses DHCP to obtain network configuration details from a DHCP server. The DHCP server assigns an IP address to the client and provides other network settings.
|
||||
|
||||
\item \textbf{ARP (Address Resolution Protocol):} Before sending packets to other devices on the local network, the client needs to resolve the MAC address of the gateway router. It sends an ARP request to obtain the MAC address of the gateway.
|
||||
|
||||
\item \textbf{Transport Layer (TCP or potentially QUIC):} The HTTP request is wrapped in a TCP packet for HTTP or a QUIC packet for HTTPS (if supported by the server). TCP provides reliable, connection-oriented communication between the client and server.
|
||||
|
||||
\item \textbf{Network Layer (IP):} The transport layer packet is encapsulated in an IP packet. IP handles routing of packets across the network based on the destination IP address.
|
||||
|
||||
\item \textbf{Data Link Layer (Ethernet or Wi-Fi):} The IP packet is encapsulated in a frame (Ethernet or Wi-Fi) for transmission over the physical network.
|
||||
|
||||
\item \textbf{Physical transmission:} The frame is transmitted over the physical medium (e.g., Ethernet cable or Wi-Fi signal).
|
||||
|
||||
\item \textbf{Internet routers:} Routers along the path receive the packet, examine the destination IP address, and forward it towards the server.
|
||||
|
||||
\item \textbf{Server's Link, Network, and Transport Layers:} The server's network stack processes the frame, extracts the IP packet, and passes it up to the transport layer. The transport layer extracts the HTTP request and passes it up to the application layer.
|
||||
|
||||
\item \textbf{Server's Application Layer (HTTP/HTTPS):} The server's web server software processes the HTTP request and generates an appropriate HTTP response.
|
||||
|
||||
\item \textbf{Back to Client:} The HTTP response is passed down the layers at the server, transmitted back over the internet, and received by the client. The response travels up the layers at the client's machine and is rendered by the web browser.
|
||||
|
||||
\item \textbf{Intra-Domain Routing:} Intra-domain routing protocols, such as RIP, OSPF, or IS-IS, are used by routers within a network to determine the best path for forwarding packets between subnets or domains.
|
||||
\end{enumerate}
|
||||
\begin{enumerate}
|
||||
\item Application Layer (HTTP or HTTPS): The web browser formulates an HTTP (or HTTPS) GET request.
|
||||
|
||||
\item Transport Layer (TCP or potentially QUIC): The HTTP request is wrapped in a TCP packet (or QUIC packet) for reliable transport.
|
||||
|
||||
\item Network Layer (IP): The transport layer packet is encapsulated in an IP packet for routing.
|
||||
|
||||
\item Data Link Layer (Ethernet or Wi-Fi): The IP packet is encapsulated in a frame (Ethernet or Wi-Fi) for transmission over the physical network.
|
||||
|
||||
\item Physical transmission: The frame is transmitted over the physical medium (e.g., Ethernet cable or Wi-Fi signal).
|
||||
|
||||
\item Internet routers: The packet travels through a series of routers, each determining the next hop based on the destination IP address.
|
||||
|
||||
\item Server's Link, Network, and Transport Layers: The packet arrives at the server's network interface, where the frame is processed and the IP packet is extracted. The transport layer demultiplexes the packet and passes the HTTP request up to the application layer.
|
||||
|
||||
\item Server's Application Layer (HTTP/HTTPS): The server's web server software processes the HTTP request and formulates an HTTP response.
|
||||
|
||||
\item Back to Client: The HTTP response is passed down the layers at the server, transmitted over the internet, and travels up the layers at the client's machine.
|
||||
|
||||
\item Data Link Layer (Ethernet or Wi-Fi): The IP packet is encapsulated in a frame (Ethernet or Wi-Fi) for transmission back to the client.
|
||||
|
||||
\item Physical transmission: The frame is transmitted over the physical medium (e.g., Ethernet cable or Wi-Fi signal).
|
||||
|
||||
\item Internet routers: The packet travels back through the series of routers, each determining the next hop towards the client.
|
||||
|
||||
\item Client's Link, Network, and Transport Layers: The packet arrives at the client's network interface, where the frame is processed and the IP packet is extracted. The transport layer demultiplexes the packet and passes the HTTP response up to the application layer.
|
||||
|
||||
\item Client's Application Layer (HTTP/HTTPS): The web browser receives the HTTP response and processes the content (e.g., HTML, images) to render the web page for the user.
|
||||
|
||||
\end{enumerate}
|
||||
\end{multicols*}
|
||||
\end{document}
|
||||
BIN
os/images/Monolythic.png
Normal file
|
After Width: | Height: | Size: 307 KiB |
BIN
os/images/memtable.png
Normal file
|
After Width: | Height: | Size: 554 KiB |
363
os/main.tex
Normal file
@ -0,0 +1,363 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage{graphicx}
|
||||
%\usepackage[fontsize=6pt]{fontsize}
|
||||
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
\usepackage[landscape]{geometry}
|
||||
|
||||
\geometry{a4paper, landscape, margin=0.5in}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage[
|
||||
protrusion=true,
|
||||
activate={true,nocompatibility},
|
||||
final,
|
||||
tracking=true,
|
||||
kerning=true,
|
||||
spacing=true,
|
||||
factor=1100]{microtype}
|
||||
\SetTracking{encoding={*}, shape=sc}{40}
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
\microtypecontext{spacing=nonfrench}
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=0.5cm,left=0.5cm,right=0.5cm,bottom=0.5cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{0.1mm}%
|
||||
{0.0mm}%x
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{0.01mm}%
|
||||
{0.0001mm}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{Operating Systems FS23}
|
||||
|
||||
\begin{document}
|
||||
\tiny
|
||||
\raggedright
|
||||
\footnotesize
|
||||
|
||||
\begin{multicols*}{3}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
\section{Intro}
|
||||
\textbf{Batch Systems:} Multiprogramming, job queue, job scheduling, no interactivity, Job Switch possibile (e.g. wait on I/O). Reduce setup time by batching.
|
||||
\textbf{Time-sharing:} multitasking, frequent change of currently running job, cpu scheduler, interactivity, 'concurrent processes', virtual memory.\\ \textbf{Summary}
|
||||
• OS efficiently manage various resources (processes, memory, I/O, files, security) but
|
||||
also ‘extend’ machines with user abstractions: processes, address spaces, files.
|
||||
• OS can only execute programs available in the main memory (the only large storage
|
||||
area that the processor can access directly).
|
||||
• Storage hierarchy ranges from small and fast but expensive (registers, caches) to larger
|
||||
and slower, but less expensive (main memory- volatile), to even larger and much
|
||||
slower, and much less expensive (disk - nonvolatile, magnetic tapes – nonvolatile).
|
||||
• Caching brings programs and data closer to the CPU for execution.
|
||||
• Switch to kernel mode involves: I/O control, timer management, and interrupt management.
|
||||
• Ontogeny Recapitulates Phylogeny: changes in technology may bring obsolete concepts back.
|
||||
|
||||
\section{OS Structures}
|
||||
\textbf{Syscalls } •\emph{Implementation}: Typically, a number associated with each system call
|
||||
– System call interface maintains a table indexed according to these
|
||||
numbers
|
||||
The system call interface invokes the intended system call in OS kernel
|
||||
and returns status of the system call and any return values
|
||||
The caller need know nothing about how the system call is implemented
|
||||
– Just needs to ‘obey’ API and understand what OS will do as a result call
|
||||
– Most details of OS interface hidden from programmer by API
|
||||
– Managed by runtime support library (set of functions built into libraries
|
||||
included with compiler). •\emph{Parameter Passing}: Often, more information is required than simply the identity of the desired
|
||||
system call
|
||||
– Exact type and amount of information vary according to OS and call
|
||||
General methods used to pass parameters to the OS
|
||||
1. Simplest: pass the parameters in registers
|
||||
– In some cases, may have more system call parameters than registers
|
||||
2. Parameters stored in a block, or table, in memory, and address of
|
||||
block passed as a parameter in a register
|
||||
– Approach taken by Linux and Solaris
|
||||
– Does not limit the number or length of parameters being passed
|
||||
3. Parameters placed, or pushed, onto the stack by the program and
|
||||
popped off the stack by the operating system
|
||||
– Does not limit the number or length of parameters being passed.
|
||||
|
||||
\textbf{Monolithic Systems} • Advantages
|
||||
- Interaction between OS modules is easy and efficient
|
||||
- Very little overhead in system call interface
|
||||
- Communication within the kernel is fast
|
||||
• Disadvantages
|
||||
- Difficult to oversee and to follow through
|
||||
- Difficult to maintain, perform local changes
|
||||
- Stability (single-point of failure)
|
||||
- Failure in a single module can threaten the entire system
|
||||
- Complexity increases with kernel scale
|
||||
|
||||
\textbf{Summary: }
|
||||
•OS: environment to execute programs; provides services to users and programs.
|
||||
•User-OS interfaces: (1) command interpreters, (2) GUI, and (3) touch-screen.
|
||||
•System calls at the heart of OS: interface to the services offered by OS
|
||||
•Process control (creation and termination), file management (reading and writing),
|
||||
device management, information management, communication, protection
|
||||
•System programs: provide utilities to users
|
||||
•A linker combines several relocatable object modules into a single binary
|
||||
executable file. A loader loads the executable file into memory, where it
|
||||
becomes eligible to run on an available CPU.
|
||||
•Applications are OS-specific due to differences in binary formats, instruction
|
||||
sets, system calls
|
||||
•OS structures are monolithic, layered, microkernel, modular, and hybrid
|
||||
|
||||
\section{Processes}PCB is an internal struct for process management. Contains all relevant information of the process(Process state, PID, cpu registers etc. This is not the same as the process memory layout. Executable code is not part of the PCB. Multiple PC $\xrightarrow{}multithreading$ .
|
||||
|
||||
\section{Scheduling}
|
||||
\emph{Short-term} (CPU) Schedules from ready queue and allocates CPU.
|
||||
\emph{Mid-term} swapping: decrease \# processes in memory if necessary.
|
||||
\emph{Long-term} Selects process to bring into ready queue. Controlls degree of multi-programming.
|
||||
\textbf{Summary: }CPU scheduling: selecting a waiting process from the ready queue and
|
||||
allocating the CPU to it (by the dispatcher).
|
||||
• Scheduling algorithms: either preemptive (where the CPU can be taken
|
||||
away from a process) or nonpreemptive (where a process must voluntarily
|
||||
relinquish control of the CPU).
|
||||
• Almost all modern operating systems are preemptive.
|
||||
• Scheduling algorithms evaluation criteria: (1) CPU utilization, (2) throughput,
|
||||
(3) turnaround time, (4) waiting time, (5) response time.
|
||||
• First-come, first-served (FCFS) scheduling: simplest scheduling algorithm, can
|
||||
cause short processes to wait for very long processes.
|
||||
• Shortest-job-first (SJF) scheduling is provably optimal, providing shortest
|
||||
average waiting time. Implementation is difficult because predicting the length
|
||||
of the next CPU burst is difficult.
|
||||
• Round-robin (RR) scheduling allocates the CPU to each process for a time
|
||||
quantum (q). If process does not relinquish the CPU before its q expires,
|
||||
process is preempted, and another process is scheduled to run for a q.
|
||||
• Priority scheduling (Prio) assigns each process a priority, and CPU is allocated
|
||||
to the process with the highest priority. Same priority processes can be
|
||||
scheduled in FCFS order or using RR scheduling.
|
||||
• Multilevel queue (MLQ) scheduling partitions processes into several separate
|
||||
queues arranged by priority, and the scheduler executes processes in the
|
||||
highest-priority queue. Different scheduling algorithms may be used in each
|
||||
queue.
|
||||
• Multilevel feedback queues (MLFQ) are similar to multilevel queues, except
|
||||
that a process may migrate between different queues.
|
||||
• Multicore processors place one/more CPUs on same physical chip, each CPU
|
||||
may have more than one hardware thread. OS sees each hardware thread as
|
||||
a logical CPU.
|
||||
• Load balancing on multicore systems equalizes loads between CPU cores;
|
||||
thread migration between cores to balance loads may invalidate cache
|
||||
contents and increase memory access times.
|
||||
\textbf{Shared memory} is under the control of user space, not OS. User process has to synchronize when accessing shared memory.
|
||||
|
||||
\textbf{Prod. Consumer Problem} Bounded Buffer requires sync.
|
||||
\textbf{Message Passing} Com. Link implmented 1. Physcal: Shared Mem, HW Bus, Network. 2. Logical: Direct (send,recv via ProcessID)/Indirect(via Mailbox(aka Ports)), Sync/Async, Auto/Explicit. • \emph{Syncing} Blocking or Non-Blocking. Rendevouz: both send and recv blocking: correctness guaranteed, lower performance.
|
||||
\section{Syncro}
|
||||
• Race condition: processes have concurrent access to shared data
|
||||
and the final result depends on the particular order in which concurrent
|
||||
accesses occur. Can result in corrupted values of shared data.
|
||||
• Critical section: section of code where shared data may be
|
||||
manipulated and a possible race condition may occur. The criticalsection problem (CSP) is to design a protocol whereby processes
|
||||
can synchronize their activity to cooperatively share data.
|
||||
• Solutions to CSP must fulfil: (1) mutual exclusion, (2) progress, and (3)
|
||||
bounded waiting.
|
||||
• Software solutions to the critical-section problem, such as Peterson's
|
||||
solution, do not work well on modern computer architectures.
|
||||
• A mutex lock provides mutual exclusion by requiring that a process acquire
|
||||
a lock before entering a critical section and release the lock on exiting the
|
||||
critical section.
|
||||
• Semaphores, like mutex locks, can be used to provide mutual exclusion.
|
||||
However, whereas a mutex lock has a binary value that indicates if the
|
||||
lock is available or not, a semaphore has an integer value and can
|
||||
therefore be used to solve a variety of other synchronization problems.
|
||||
• A monitor is an abstract data type that provides a high-level form of
|
||||
process synchronization. A monitor uses condition variables that allow
|
||||
processes to wait for certain conditions to become true and to signal one
|
||||
another when conditions have been set to true.
|
||||
• Solutions to the critical-section problem may suffer from deadlocks.
|
||||
\textbf{Deadlocks Characterization: } A deadlock can arise if all these four (non-independent) conditions hold
|
||||
simultaneously:
|
||||
\textbf{D1}. Mutual exclusion: only one process at a time can use a resource.
|
||||
\textbf{D2}. Hold and wait: a process holding at least one resource is waiting to
|
||||
acquire additional resources held by other processes.
|
||||
\textbf{D3}. No preemption: a resource can be released only voluntarily by the
|
||||
process holding it, after that process has completed its task.
|
||||
\textbf{D4}. Circular wait: there exists a set {P0, P1, …, Pn} of waiting processes
|
||||
such that P0 is waiting for a resource that is held by P1, P1 is waiting for a
|
||||
resource that is held by P2, …, Pn–1 is waiting for a resource that is held
|
||||
by Pn, and Pn is waiting for a resource that is held by P0. •\emph{Deadlock Prevention: } Avoid deadlock by having a strict lock hierarchy, i.e. addressing D.4 (only practical option).
|
||||
\section{Main Memory}
|
||||
\textbf{Address Binding}: Important: addresses represented in different ways at different stages of
|
||||
a program’s life
|
||||
– Source code addresses usually symbolic (not physical). Compile Time: If mem loc known, abs. code location generatable; recompile of loc. on changes.
|
||||
– Compiled code addresses bind to relocatable memory addresses.
|
||||
– i.e., “14 bytes from beginning of this memory module”
|
||||
– Linker or loader will bind relocatable addresses to absolute addresses, if not know at compile time.
|
||||
– Each binding maps one address space to another:
|
||||
code addresses to relocatable addresses to absolute addresses
|
||||
- Execute time: binding here if proc relocatable during execution (need hw support). Most OSs bind at exec time.
|
||||
\textbf{MMU:} HW device maps virt to phys add \emph{during execution}. logicl add. plus reloc. reg. = relocable mem add. \textbf{Exec-time addr binding: }log. addr. bound during exec. by MMU.
|
||||
\textbf{Dynamic Loading }($\not =$ d.linking) routine loaded only when called.
|
||||
\textbf{Two Paritions:} User proc (in low physical mem) plus resident OS(in high physical mem with interupt vec). \emph{Contiguous alloc:} each proc in single section of memm.
|
||||
\textbf{Mem protect:} Reloc plus limit reg used in context switch to protect user proc form each other. MMU \emph{dynamically} maps log. to phys. addr.
|
||||
\textbf{Multiple Part:} Deg. of multiprog. limmited by no. of part. in mem. for procs. \emph{Variable part. size.} for efficiency.
|
||||
\emph{First-fit:} allocate first hole.equiv to best-fit for storage, but faster. \emph{Best-fit:} smallest hole. large enough. must search entire list, unless ordered by size. $\Rightarrow$ get smallest leftover area.
|
||||
\textbf{External fra.} many small, non-contiguous holes. Results in unusable memory.\emph{SOl:} Compaction: shuffle mem content to have all free mem in 1 large block. Only possible if dyn. addr. reloc. aka done at exec time. also: non-contiguous alloc see paging. \textbf{Internal frag:} alloc mem block may be larger than requested $\Rightarrow$ not whole block used.
|
||||
\textbf{Paging:} alloc proc mem whenever and wherever available. procs. phys. addr. may be non-contiguous. Physical addr. div into fixed size blocks aka \emph{frames.}. Log. mem. div. into \emph{pages.}
|
||||
Use \emph{page table} to map N pages to N frames. Starage also split into same size blocks. •Addr trans scheme: trans page number p to frame number f, use same offset d. frame ID times pagesize plus offset $=$ phys.mem. addr. Like using table of base/relocation regs, per mem frame. $\Rightarrow$ NO external fragmentation. IMPORTANT: Page table is \emph{per proc}.
|
||||
\textbf{Paging implementatin:} HW w.o. TLB: every data/instr access need two mem access: page table then frame. With TLB: HW cache aka TLB with key,val pars. if TLB miss get from page table in mem.
|
||||
\textbf{Protection:} bit indicating RO or RW(can add more e.g. for exec). Valid-Invalid bit, additional bit per entry in PT set by OS. indicates if page in LAS of proc. Violation results in trap to kernel.
|
||||
\textbf{Demand Pageing Page Fault Handling:} 1. Ref. LA in PT invalid bit. 2. Trap to OS. 3. Page in storge. 4. bring in missing page to free frame. 5.reset PT 6. restart instruction.
|
||||
\textbf{Page Replacement Algorithms: } FIFO, Least recently used, Optimal Algo (OTP) as reference.
|
||||
\textbf{Frame Alloc Algo:} 1.Fixed: 1.1Equal, m frames n proces so m/n frames/proc. 1.2Proportional,then dep on size of programm. 2. Prority based: Proportional scheme plus prority instead of size (or size and prio mixed). \textbf{Global Repl:} Pro: greater troughput.more commonly used) Con: proc exec time can greatly vary. \textbf{Local Repl:} Pro: more consistent per proc perf. Con: unerutilized memory.
|
||||
|
||||
\textbf{Thrashing} Proc needs frames, high Page fault, get desired page, replace existing frame, quickly need replaced frame back, repeat. CPU seems unused $\Rightarrow$ OS gives CPU more work, thrashing repeats even more.
|
||||
\textbf{Locality:} Set of pages activly used together by proc. Model:Proc fault for locality until all pages of local in mem. not fault again until locality changes. Localities may overlap. A \textbf{working set} is based on locality and is defined as the set of pages currently in
|
||||
use by a process
|
||||
|
||||
\section{FileSys}
|
||||
• File: an abstract data type defined and implemented by OS. It is a sequence of logical records. A logical record may be a byte, a line (of fixed or variable length), or a more complex data item.
|
||||
• File operations: create, open, write, read, reposition, delete, truncate • File access methods: sequential and direct • Directories: Within a file system, it is useful to create directories to allow files to be organized.
|
||||
• Tree-structured directory allows a user to create subdirectories to
|
||||
organize files.
|
||||
• Acyclic-graph directory: enable users to share subdirectories and files but complicate searching and deletion.
|
||||
• A general graph: allows complete flexibility in the sharing of files and directories but sometimes requires garbage collection to recover
|
||||
unused disk space.
|
||||
• Most file systems reside on secondary storage, which is designed to hold a large amount of data permanently
|
||||
• The most common secondary-storage medium is the disk, but the use of NVM devices is increasing
|
||||
• File systems are mounted onto a logical file system architecture to make them available for use
|
||||
• File systems are often implemented in a layered or modular structure • Files within a file system can be allocated space on the storage device in three ways: through contiguous, linked, or indexed allocation
|
||||
• Contiguous allocation can suffer from external fragmentation. • Direct access is very inefficient with linked allocation.
|
||||
• Indexed allocation may require substantial overhead for its index block.
|
||||
• Free-space allocation methods also influence the efficiency of diskspace use, the performance of the file system, and the reliability of secondary storage.
|
||||
• Free-space allocation methods include bit vectors and linked lists.
|
||||
• A general-purpose computer system can have multiple storage devices, and those devices can be sliced up into partitions, which hold volumes, which in turn hold file systems
|
||||
• File systems must also be mounted before they are used • A mount point is a location within the directory structure where the file system will be attached
|
||||
• A boot loader is a set of blocks that contain enough code to know how to load the kernel from the file system
|
||||
• The root partition selected by the boot loader must be mounted at boot
|
||||
time
|
||||
\section{Security}
|
||||
• A general-purpose computer system can have multiple storage devices, and those devices can be sliced up into partitions, which hold volumes, which in turn hold file systems
|
||||
• File systems must also be mounted before they are used • A mount point is a location within the directory structure where the file system will be attached
|
||||
• A boot loader is a set of blocks that contain enough code to know how to load the kernel from the file system
|
||||
• The root partition selected by the boot loader must be mounted at boot
|
||||
time
|
||||
• System protection features are guided by the principle of need-toknow and implement mechanisms to enforce the principle of least privilege.
|
||||
• Computer systems contain objects that must be protected from misuse.
|
||||
• Objects may be hardware (such as memory, CPU time, and I/O devices) or software (such as files, programs, and semaphores).
|
||||
• An access right is permission to perform an operation on an object. • A domain is a set of access rights. • Processes execute in domains and may use any of the access rights
|
||||
in the domain to access and manipulate objects.
|
||||
• The access matrix is a general model of protection that provides a mechanism for protection without imposing a particular protection policy on the system or its users
|
||||
• The access matrix is sparse. It is normally implemented as access
|
||||
lists associated with each object
|
||||
\section{Virtualization:}
|
||||
\textbf{Trap-and-emulate:}
|
||||
• A virtual machine guest can execute only in user mode
|
||||
• The kernel, of course, runs in kernel mode, and it is not safe to allow userlevel code to run in kernel mode
|
||||
• We must have a virtual user mode and a virtual kernel mode, both of which run in physical user mode.
|
||||
• A transfer from virtual user mode to virtual kernel mode in the virtual machine happens when privileged
|
||||
instructions are executed
|
||||
|
||||
\textbf{Binary Translation:}
|
||||
• Some CPUs do not have a clean separation of privileged and nonprivileged instructions
|
||||
• Binary Translation is a technique that makes VMM inspect all instructions initiated by the guest OS.
|
||||
• Instructions that would behave differently in real kernel mode translated into a set of new
|
||||
instructions
|
||||
\textbf{Hypervisors:}
|
||||
• Type 0 Hypervisor • have existed for many years under many names, including “partitions” and “domains.”
|
||||
• The VMM itself is encoded in the firmware and loaded at boot time
|
||||
• VMM loads the guest images to run in each partition.
|
||||
• The feature set of a type 0 hypervisor tends to be smaller than those of the other types because it is
|
||||
implemented in hardware
|
||||
• Type 1 hypervisor (bare metal) • commonly found in company data centers • special-purpose operating system that runs natively on the hardware
|
||||
• Type 2 hypervisor (hosted) • simply another process run and managed by the host OS • the host host OS does not know that virtualization is happening
|
||||
• there is very little operating-system involvement in type 2 hypervisor
|
||||
• Both type 1 and type 2 hypervisors work with unmodified guest operating systems and they have to jump through hoops to get good performance (recall trap-thenemulate approach)
|
||||
• Paravirtualization takes another approach. It avoid causing traps by modifying the source code of the
|
||||
guest operating system
|
||||
\section{OS \& Performance}
|
||||
• The OS decides mapping of Processes/Threads in time and space
|
||||
• Preemptive scheduling to enable fairness among all threads
|
||||
• Too long Time Quantum => less responsiveness of the applications
|
||||
• Too short Time Quantum => frequent context switches
|
||||
• The OS decides mapping of Processes/Threads in time and space
|
||||
• Migrating the thread to a different CPU core.
|
||||
• Load Balancing aims to evenly distribute the workload across available
|
||||
CPU cores to maximize overall system performance
|
||||
• Cache Affinity refers to the preference for a thread to execute on the same
|
||||
CPU core where its associated cache data resides
|
||||
• OS memory management ensures that applications receive sufficient
|
||||
and contiguous memory blocks, minimizing fragmentation, and
|
||||
optimizing memory usage
|
||||
• Memory Paging
|
||||
• Page Size
|
||||
• Too large: leads to internal fragmentation
|
||||
• Too small: leads to external fragmentation, where free memory is divided
|
||||
into small, non-contiguous blocks, making it challenging to allocate larger
|
||||
memory chunk
|
||||
• Efficient and optimized file systems can improve file access times and
|
||||
overall I/O performance
|
||||
• Well-designed I/O scheduling algorithms can minimize seek times,
|
||||
reduce I/O bottlenecks, and enhance application responsiveness
|
||||
• Buffering and caching mechanisms reduce physical disk accesses,
|
||||
improving I/O performance by providing faster access times
|
||||
• Asynchronous I/O allows for concurrent execution of I/O operations,
|
||||
minimizing I/O-related delays and optimizing resource utilization
|
||||
|
||||
%\includegraphics[width=\linewidth]{images/memtable.png}O
|
||||
\end{multicols*}
|
||||
\end{document}
|
||||
286
os/quiz.tex
Normal file
@ -0,0 +1,286 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
%\usepackage[fontsize=6pt]{fontsize}
|
||||
|
||||
\usepackage[landscape]{geometry}
|
||||
|
||||
\geometry{a4paper, landscape, margin=0.5in}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage[
|
||||
protrusion=true,
|
||||
activate={true,nocompatibility},
|
||||
final,
|
||||
tracking=true,
|
||||
kerning=true,
|
||||
spacing=true,
|
||||
factor=1100]{microtype}
|
||||
\SetTracking{encoding={*}, shape=sc}{40}
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
\microtypecontext{spacing=nonfrench}
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{0.1mm}%
|
||||
{0.0001mm}%x
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{0mm}%
|
||||
{0mm}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{Operating Systems FS23}
|
||||
|
||||
\begin{document}
|
||||
\tiny
|
||||
\raggedright
|
||||
\footnotesize
|
||||
|
||||
\begin{multicols*}{3}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
|
||||
|
||||
\section{Intro}
|
||||
\textbf{Asym. Mult. Proc. (AMP):} Special task per proc. Single OS code in mem, only run on CPU1. syscalls and cache buffer all to CPU1. Mem shared across all, scheduling accross all CPUs \textbf{SMP:} All procs perform all tasks. Sync issues. Syscalls proc on CPU where occurs.
|
||||
|
||||
\rule{\linewidth}{.1pt}
|
||||
\section{OS Structures}
|
||||
|
||||
\section{Processes-Threads-Concurrency}
|
||||
\textbf{What is a process?} In the context of operating systems, a process is an instance of a program that is being executed by one or many threads. It is a unit of work within the operating system that can be scheduled and allocated resources such as CPU time, memory, and I/O devices. Each process has its own address space, which contains the code being executed, data structures, and other resources required by the program. Processes are managed by the operating system's process management subsystem, which provides mechanisms for creating, terminating, and synchronizing processes.\\
|
||||
\textbf{For a single-processor system, there will never be more than one process in the Running state.} This statement is not entirely true. In a single-processor system, there can be multiple processes in the Running state, but only one process can be executing on the CPU at any given time. The operating system uses a scheduling algorithm to determine which process should be given access to the CPU next. The scheduler may switch between processes frequently, giving each process a small amount of CPU time before switching to another process. This gives the illusion that multiple processes are running simultaneously, even though only one process is executing on the CPU at any given time. QUIZ says this statement is true!\\
|
||||
\textbf{An I/O-bound process spends more time doing computations than I/O.} This statement is not correct. An I/O-bound process is a process that spends most of its time waiting for I/O operations to complete, such as reading from or writing to a disk or network. In contrast, a CPU-bound process is a process that spends most of its time performing computations and using the CPU. Therefore, an I/O-bound process spends more time waiting for I/O operations to complete than performing computations.\\
|
||||
\textbf{What / who selects which process should be executed next (from the ready queue) and allocates CPU?} The responsibility of selecting which process should be executed next and allocating CPU time to that process lies with the operating system's scheduler. The scheduler is a component of the operating system's process management subsystem and is responsible for managing the execution of processes on the CPU. When a process is ready to run, it is placed in a queue called the ready queue. The scheduler selects a process from the ready queue based on its scheduling algorithm and allocates CPU time to that process. The scheduling algorithm can be preemptive or non-preemptive, depending on whether the scheduler can interrupt a running process to allocate CPU time to another process.\\
|
||||
\textbf{An I/O-bound process spends more time doing I/O than computations.} Yes, this statement is correct. An I/O-bound process is a process that spends most of its time waiting for I/O operations to complete, such as reading from or writing to a disk or network. Therefore, an I/O-bound process spends more time doing I/O operations than computations. In contrast, a CPU-bound process is a process that spends most of its time performing computations and using the CPU.\\
|
||||
\textbf{Which benefits go to multithreaded programming?} Multithreaded programming offers several benefits, including:
|
||||
|
||||
1. Responsiveness: Multithreading allows a program to continue executing even if part of it is blocked or waiting for I/O operations to complete. This is especially important for user interfaces and time-sharing systems.
|
||||
|
||||
2. Resource sharing: Threads share the resources of their parent process, making it easier to share data between threads than between separate processes.
|
||||
|
||||
3. Faster execution: Creating a thread is generally faster and less resource-intensive than creating a new process, and switching between threads has lower overhead than switching between processes.
|
||||
|
||||
4. Scalability: Even a single process can take advantage of multiprocessor architectures by using multiple threads to execute tasks in parallel.
|
||||
|
||||
5. Simplified code: Multithreading can simplify the code by allowing different parts of the program to execute concurrently without requiring complex synchronization mechanisms.
|
||||
|
||||
These benefits make multithreaded programming an attractive option for developing efficient and responsive applications that can take advantage of modern hardware architectures.\\
|
||||
\textbf{What is the difference between parallelism and concurrency?} Concurrency and parallelism are related concepts in computer science, but they have different meanings.
|
||||
|
||||
Concurrency refers to the ability of a program to make progress on multiple tasks simultaneously. In a concurrent program, different parts of the program can execute independently and make progress at the same time. Concurrency is often achieved through the use of threads or other mechanisms that allow different parts of the program to execute concurrently.
|
||||
|
||||
Parallelism, on the other hand, refers to the ability of a system to perform multiple tasks simultaneously. In a parallel system, multiple processors or cores work together to execute different tasks simultaneously. Parallelism is often used in high-performance computing applications that require large amounts of computation.
|
||||
|
||||
In summary, concurrency is a property of a program that allows it to make progress on multiple tasks simultaneously, while parallelism is a property of a system that allows it to perform multiple tasks simultaneously by using multiple processors or cores.\\
|
||||
\textbf{Amdahl's Law describes performance gains for applications with both a serial and parallel component.} Yes, that is correct. Amdahl's Law is a formula that describes the theoretical maximum speedup that can be achieved by parallelizing a program. It takes into account the proportion of the program that can be parallelized and the number of processors used to execute the program.
|
||||
|
||||
Amdahl's Law states that the speedup of a program is limited by its serial portion, which cannot be parallelized. The formula for Amdahl's Law is:
|
||||
|
||||
Speedup = 1 / (Serial fraction + Parallel fraction / Number of processors)
|
||||
|
||||
This formula shows that as the number of processors increases, the speedup of a program approaches a limit determined by its serial portion. Therefore, Amdahl's Law highlights the importance of identifying and optimizing the serial portion of a program to achieve maximum performance gains from parallelization.\\
|
||||
\textbf{What concepts refers to the capability to allow multiple tasks to make progress on a single processor?} The concept that refers to the capability to allow multiple tasks to make progress on a single processor is concurrency. Concurrency allows different parts of a program to execute independently and make progress at the same time, even if they are running on a single processor.
|
||||
|
||||
Concurrency can be achieved through various mechanisms, such as time slicing or cooperative multitasking, which allow different tasks to take turns executing on the processor. However, it's important to note that while concurrency can improve the responsiveness and performance of a program, it does not provide true parallelism since only one task is executing at any given moment.\\
|
||||
\textbf{The dispatcher module gives control of the CPU (core) to the process selected by the short-term scheduler, this means:} Yes, that is correct. The dispatcher module is responsible for giving control of the CPU (core) to the process selected by the short-term scheduler. Once the short-term scheduler selects a process to run, the dispatcher is responsible for performing a context switch to save the state of the currently running process and restore the state of the selected process.
|
||||
|
||||
The dispatcher then transfers control of the CPU to the selected process, allowing it to execute on the processor. This process continues until either the selected process completes its execution or is preempted by another process due to a higher-priority task or time slice expiration.
|
||||
|
||||
Overall, the dispatcher plays a critical role in managing and scheduling processes on a system's CPU, ensuring that each process gets its fair share of processing time and that system resources are used efficiently.
|
||||
|
||||
CPU switches to user mode. Switches context from one proc to another, jumps to proper location in the userprogram to resume that program.
|
||||
\\
|
||||
\textbf{Turnaround time is the amount of time to execute a particular process.} I'm sorry, but that statement is not entirely accurate. Turnaround time is actually the amount of time it takes for a process to complete execution from the moment it was submitted for execution. It includes both the time spent waiting in the ready queue and the actual execution time on the CPU. Therefore, it is a measure of how long a process takes to run from start to finish. This information can be found on page 16 of this PDF file under "Process Scheduling". In Quiz statement eas marked as correct!\\
|
||||
\textbf{First-Come, First-Served (FCFS) Scheduling gives minimum average waiting time for a given set of processes.} I'm sorry, but that statement is not entirely accurate. First-Come, First-Served (FCFS) Scheduling is a non-preemptive scheduling algorithm in which the process that arrives first is executed first. While it may seem like FCFS gives minimum average waiting time for a given set of processes, this is not always the case. In fact, FCFS can lead to long waiting times for processes that arrive later if the first process has a long CPU burst time. Therefore, it may not be the most efficient scheduling algorithm in terms of minimizing average waiting time. This information can be found on page 18 of this PDF file under "Scheduler Types".\\
|
||||
\textbf{Multilevel feedback queues are similar to multilevel queues, except that a process may migrate between different queues.} Yes, that statement is correct. Multilevel feedback queues are similar to multilevel queues in that they have multiple priority levels, but they differ in that a process can move between different queues based on its behavior. In a multilevel feedback queue, a process starts at the highest priority queue and moves down the hierarchy if it uses too much CPU time or has too many input/output operations. Conversely, if a process uses very little CPU time or has few input/output operations, it may move up the hierarchy to a higher priority queue. This allows for more efficient scheduling of processes based on their behavior and resource needs. This information can be found on page 20 of this PDF file under "Multilevel Feedback Queues".\\
|
||||
\rule{\linewidth}{.1pt}
|
||||
\section{Synchronization}
|
||||
\textbf{Race Conditons:} A race condition is a situation that occurs when two or more processes have concurrent access to shared data and the final result depends on the particular order in which concurrent accesses occur. This can lead to inconsistent or corrupted values of shared data. You can find more information about race conditions and the critical section problem on page 53 of this PDF file (Sync.a). Outcome depends on order of access.\\
|
||||
\textbf{Ch. Sync a}\\
|
||||
\textbf{A mutex lock is released immediately after entering a critical section?} A mutex lock is released on exiting the critical section, not immediately after entering it. A mutex lock provides mutual exclusion by requiring that a process acquire a lock before entering a critical section and release the lock on exiting the critical section. You can find more information about mutex locks and their usage in solving the critical section problem on page 21 and 53 of this PDF file (sync a).
|
||||
\\
|
||||
\textbf{A semaphore has an integer value.} Yes, a semaphore has an integer value. There are two types of semaphores: counting semaphore and binary semaphore. A counting semaphore is initialized to a positive integer value and can range over an unrestricted domain, while a binary semaphore is initialized to either 0 or 1 and can range only between 0 and 1. You can find more information about semaphores and their usage on page 25 of this PDF file. (sync a).\\
|
||||
\textbf{Semaphores and mutex locks both provide mutual exclusion.} Yes, both semaphores and mutex locks can be used to provide mutual exclusion. A mutex lock has a binary value that indicates if the lock is available or not, while a semaphore has an integer value and can therefore be used to solve a variety of other synchronization problems. You can find more information about semaphores and mutex locks on page 54 of this PDF file.\\
|
||||
\textbf{The value of a counting semaphore can range only between 0 and 1.} No, the statement is incorrect. A counting semaphore can have an integer value that ranges over an unrestricted domain, while a binary semaphore can have a value that ranges only between 0 and 1. You can find more information about semaphores and their types on page 25 of this PDF file. A binary semaphore has value 0 or 1, but not inbetween. Semaphores are integers. --> false in quiz??\\
|
||||
\textbf{Which of the following may cause a liveness failure?
|
||||
1. an infinite loop
|
||||
2. a deadlock
|
||||
3. a busy waiting loop
|
||||
4. all of the other answers} Option 4 is correct. All of the given options may cause a liveness failure. An infinite loop can cause a process to never complete, leading to a liveness failure. A deadlock occurs when two or more processes are waiting indefinitely for an event that can be caused by only one of the waiting processes, leading to a liveness failure. Busy waiting loop can also lead to a liveness failure as it consumes CPU cycles without performing any useful work. You can find more information about liveness failures and their causes on page 28 of this PDF file. A liveness failure is a type of concurrency bug that occurs when a process or system fails to make progress towards its intended goal. In other words, the system becomes stuck and cannot proceed to the next state or complete its task.\\
|
||||
\textbf{Solutions to the critical section problem may suffer from liveness failures.} Yes, that is correct. Solutions to the critical section problem may suffer from liveness failures such as deadlocks, livelocks, and starvation. Deadlocks occur when two or more processes are waiting indefinitely for an event that can be caused by only one of the waiting processes. Livelocks occur when two or more processes change their states in response to the state changes of other processes without making progress towards completing their tasks. Starvation occurs when a process is perpetually denied access to a resource it needs to complete its task. You can find more information about liveness failures and their causes on page 28 of this PDF file.\\
|
||||
\textbf{When are atomic integers in Linux useful? When not?} Atomic integers in Linux are useful when multiple threads or processes need to access and modify a shared integer variable concurrently. Atomic integers provide a way to perform read-modify-write operations on the shared variable atomically, without the need for locks or other synchronization mechanisms. This ensures that the shared variable is always in a consistent state, even when accessed by multiple threads or processes simultaneously. Atomic integers are commonly used in Linux kernel programming for implementing synchronization primitives such as semaphores and spinlocks. You can find more information about atomic operations and their usage in Linux on page 47 of this PDF file. However, they are not useful when several variables contribute to possible race conditions. (Page 21 Examples). In Quiz: Useful when, Several variables are invonvled in rc, single proc access several lvars involved in rc, int needs to be updated.\\
|
||||
\textbf{Emergence of multicore systems has put greater emphasis on developing novel concurrency techniques, because} the emergence of multicore systems has made it possible to execute multiple threads or processes simultaneously on a single machine. However, this also introduces new challenges related to concurrency and synchronization. Traditional concurrency techniques such as locks and semaphores may not be efficient or scalable enough to handle the increased number of threads or processes running on a multicore system. This is because these techniques often rely on serialization, which can lead to contention and overhead when multiple threads or processes try to access the same shared resource. In addition, traditional techniques may not take full advantage of the parallelism offered by multicore systems, leading to suboptimal performance. Therefore, there is a need for novel concurrency techniques that can take advantage of the parallelism offered by multicore systems while ensuring correctness and avoiding issues such as deadlocks and race conditions. You can find more information about the challenges posed by multicore systems and the need for novel concurrency techniques on page 21 of this PDF file. QUIZ Answer: Increasing number of processing cores causes more risk of race conditions. NOT Fundamentally new problems, race conditions are not more difficult to solve nor are deadlocks more difficult to prevent or avoid in multicore system. But the risk of running into a risk conditions rises.\\
|
||||
\
|
||||
\textbf{Critical Region:} Part of programm where shared memory is accessed.\\
|
||||
\textbf{Ch Sync Deadlocks}\\
|
||||
\textbf{Which of the following is not a factor in choosing which process to terminate during recovery from a deadlock?
|
||||
1. How many more resources the process needs in order to complete.
|
||||
2. How many processes will need to be terminated.
|
||||
3. Who the parent of the process is.
|
||||
4. What the priority of the process is.} Option 3 is not a factor in choosing which process to terminate during recovery from a deadlock. The other options are all factors that may be considered when deciding which process to terminate in order to resolve a deadlock. Specifically, the number of resources needed by the process, the number of processes that need to be terminated, and the priority of the process are all important factors that can help determine which process should be terminated to minimize disruption and maximize system performance. However, who the parent of the process is typically not relevant in this context.\\
|
||||
\textbf{What are the conditions for a deadlock?} There are four necessary conditions for a deadlock to occur in a system. These conditions are:
|
||||
|
||||
1. Mutual exclusion: At least one resource must be held in a non-shareable mode, meaning that only one process at a time can use the resource.
|
||||
|
||||
2. Hold and wait: A process holding at least one resource is waiting to acquire additional resources held by other processes.
|
||||
|
||||
3. No preemption: A resource cannot be forcibly removed from a process holding it; it can only be released voluntarily by the process after it has completed its task.
|
||||
|
||||
4. Circular wait: There exists a set of waiting processes such that each process in the set is waiting for a resource held by another process in the set, creating a circular chain of dependencies.
|
||||
|
||||
If all four of these conditions hold simultaneously, then a deadlock can occur in the system.\\
|
||||
\textbf{The circular-wait condition for a deadlock implies the hold-and-wait condition.} Yes, the circular-wait condition for a deadlock implies the hold-and-wait condition. The hold-and-wait condition states that a process holding at least one resource is waiting to acquire additional resources held by other processes. The circular-wait condition, on the other hand, states that there exists a set of waiting processes such that each process in the set is waiting for a resource held by another process in the set. If this condition holds, then it implies that at least one process in the set must be holding a resource while waiting for another resource to be released by another process in the set. Therefore, the circular-wait condition implies the hold-and-wait condition.\\
|
||||
\textbf{A system will never enter a deadlocked state if
|
||||
1. None of the other answers.
|
||||
2. The system uses the detection and recovery technique.
|
||||
3. The system uses the deadlock avoidance technique.
|
||||
4. The system chooses to ignore the problem altogether.} Option 3 is not correct. A system will never enter a deadlocked state if it employs a protocol to ensure that the system will never enter a deadlock state. This can be achieved through either of the following techniques:
|
||||
|
||||
1. Deadlock prevention: This technique ensures that at least one of the four necessary conditions for deadlock cannot occur in the system.
|
||||
|
||||
2. Deadlock avoidance: This technique involves dynamically allocating resources to processes in a way that avoids the possibility of a deadlock.
|
||||
|
||||
Option 2 is incorrect because detection and recovery is not a prevention technique, but rather a way to handle deadlocks after they have already occurred. Option 4 is also incorrect because ignoring the problem altogether does not prevent deadlocks from occurring; it simply means that the system will not attempt to handle them proactively. The information can be found on page 20 of the PDF file. QUIZ says that using a deadlock avoidance tequnique will make a system not deadlock...\\
|
||||
\textbf{Deadlock prevention and deadlock avoidance are essentially the same approaches for handling a deadlock.} No, deadlock prevention and deadlock avoidance are not the same approaches for handling a deadlock. Deadlock prevention involves designing the system in such a way that at least one of the four necessary conditions for deadlock cannot occur. This can be achieved by using techniques such as resource allocation, ordering of resources, or disabling interrupts. On the other hand, deadlock avoidance involves dynamically allocating resources to processes in a way that avoids the possibility of a deadlock. This is done by using algorithms that predict whether a particular resource allocation will lead to a deadlock or not. If it is predicted that a particular allocation will lead to a deadlock, then the system will avoid making that allocation.
|
||||
|
||||
Therefore, while both techniques aim to prevent deadlocks from occurring, they use different approaches to achieve this goal. The information can be found on page 20 of the PDF file.\\
|
||||
\textbf{A system in an unsafe state will ultimately deadlock.}Yes, a system in an unsafe state can ultimately deadlock. An unsafe state is a state in which a set of processes is holding resources and waiting for others that are also being held by other processes. If the system enters an unsafe state, it may be possible to allocate resources in such a way that a deadlock will not occur. However, if no safe sequence of resource allocations exists, then the system will eventually deadlock. Therefore, it is important to ensure that the system does not enter an unsafe state in the first place by using techniques such as deadlock prevention or avoidance. The information can be found on page 21 of the PDF file. QUIZ says: False. I think that it is not deterministic to get into a deadlock\\
|
||||
|
||||
\rule{\linewidth}{.1pt}
|
||||
\section{Main Memory}
|
||||
\textbf{Ch Main Memory}\\
|
||||
\textbf{Why do Computing Systems need cache? }Computing systems need cache to reduce memory stalls. Main memory access can take many clock cycles, causing a stall. Cache (fast memory) is added between main memory and CPU registers to reduce memory stalls. This helps improve the overall performance of the system by reducing the time it takes for the CPU to access data that is frequently used. (Page 6)\\
|
||||
\textbf{An address generated by a CPU} is referred to as a logical address. The memory management unit (MMU) then translates this logical address to a physical address in memory. (Page 13)\\
|
||||
\textbf{A large page size results in}
|
||||
\\
|
||||
\textbf{Hashed page tables are particularly usefilffor processes with sparse address space }
|
||||
p.59 says clustered page tables (veriation of hashed p t) are good for sparse address space, does not mention processes though...\\
|
||||
\textbf{An advantage of virtual memory is that
|
||||
1. you do not have to worry about the amount of physical memory available
|
||||
2. provides a way to execute a program that is only partially loaded in memory
|
||||
3. all of the other answers
|
||||
4. a program can be much larger than the size of physical memory} The correct answer is 3. All of the other answers are advantages of virtual memory. Virtual memory allows for efficient and safe sharing of memory among multiple processes, enables larger programs to run on a system with limited physical memory, and provides a way to execute a program that is only partially loaded in memory. Additionally, virtual memory eliminates the need for programmers to worry about the amount of physical memory available. (Page 5)\\
|
||||
\textbf{Anonymous memory of a process refers to
|
||||
1. the pages not associated with the binary executable file of the process.
|
||||
2. the pages that cannot be swapped out of the physical memory.
|
||||
3. the pages associated with the binary executable file of the process.
|
||||
4. the pages associated with the static data of the process.} The correct answer is 1. Anonymous memory of a process refers to the pages not associated with the binary executable file of the process. (Page 19) \\
|
||||
\textbf{A sign of thrashing is
|
||||
1. CPU utilization decreases as the degree of multiprogramming is increased
|
||||
2. CPU utilization decreases as the number of allocated pages is increased
|
||||
3. CPU utilization increases as the degree of multiprogramming is increased
|
||||
4. CPU utilization increases as the number of allocated pages is increased} The correct answer is 1. A sign of thrashing is that CPU utilization decreases as the degree of multiprogramming is increased. (Page 19)\\
|
||||
\textbf{Current best practice to avoid thrashing is to inlucde enoughh physical memory} While having enough physical memory is important to avoid thrashing, it is not the only best practice. Other best practices include using efficient memory allocation algorithms, reducing the degree of multiprogramming, and using demand paging. True according to lecture notes page 30 VM part 2.\\
|
||||
\textbf{Larger page size results in less total allocated memory?} No, it increases allocated memory since each process has a higher minimum amount of momory.
|
||||
\textbf{Does a larger page size reduce I/O time?} Yes, since the CPU can get bigger blocks of data off disk into main memory.
|
||||
\textbf{How to imporve I/O?} Caching.
|
||||
\section{File System}
|
||||
\textbf{Chapter file Systems a.}
|
||||
\textbf{How is a file identified within a file system?}A file is uniquely identified within a file system by its identifier, which is usually a unique number. This identifier is one of the basic attributes of a file, along with its name, type (if the OS supports different types of files), location, size, timestamps (creation, modification, and latest use), protection information on who has access (read, write, and modify), and other attributes such as type of encoding. You can find more information about this on page 11 of this PDF file.\\
|
||||
\textbf{What steps are necessary to create a file?} To create a file, there are two steps that need to be taken. First, the operating system needs to find a space to store the new file. Second, the operating system needs to make an entry for the new file in a directory. (p 12).
|
||||
\\
|
||||
\textbf{The FAT method does not store information about free blocks.} QUIZ says is correct--> ask in discussion!!!!!!
|
||||
That is correct. The File Allocation Table (FAT) method does not store information about free blocks. Instead, it uses a linked list to keep track of free blocks. This means that the FAT method can be less efficient than other methods when it comes to managing free space on a disk. p17/18 BUT ACTUALL: Actually, the File Allocation Table (FAT) file system does store information about free blocks. The FAT file system is a simple and commonly used file system originally introduced by Microsoft for MS-DOS and later adopted by other operating systems.
|
||||
|
||||
In the FAT file system, the file allocation table is a table that keeps track of the allocation status of each block or cluster on the disk. Each entry in the table corresponds to a specific block or cluster, and it indicates whether the block is free or allocated.
|
||||
|
||||
When a file is stored on a disk using the FAT file system, the file system looks for a sequence of free blocks in the file allocation table to allocate to that file. As the file grows or shrinks, the file system updates the corresponding entries in the table to reflect the changes in the allocation status of the blocks.
|
||||
|
||||
So, the FAT file system does store information about free blocks in the file allocation table, allowing the file system to keep track of available space on the disk and efficiently allocate and manage file storage.\\
|
||||
|
||||
\textbf{Using x-bit pointers limits the size of a file to 4 GB} The use of 32-bit pointers limits the size of a file to 4 GB. This is because a 32-bit pointer can address up to $2^32$ bytes of memory, which is equal to 4 GB. Therefore, if a file system uses 32-bit pointers, the maximum size of a file that can be stored on that file system is limited to 4 GB. \\
|
||||
|
||||
\textbf{Mounting a file system means that: }The filesystem is becoming available within the file system namespace. CHATGPT: Mounting a file system means that it is being made accessible and integrated into the operating system's directory hierarchy. When a file system is mounted, the operating system establishes a connection between the file system and a specific directory known as the mount point.
|
||||
|
||||
Here's what happens when a file system is mounted:
|
||||
|
||||
The operating system identifies the device or partition that contains the file system. This can be a hard drive, a solid-state drive, a network storage device, or any other storage medium.
|
||||
|
||||
The operating system reads the file system's metadata, such as the file allocation table, inode table, or other data structures, to understand the organization and structure of the file system.
|
||||
|
||||
A mount point is selected, which is an existing directory in the operating system's file hierarchy. The mount point acts as a reference point for accessing the contents of the mounted file system.
|
||||
|
||||
The operating system establishes a link between the mount point and the file system, enabling access to the files and directories within the file system. The files and directories within the mounted file system become accessible through the mount point.
|
||||
|
||||
Once the file system is mounted, users and applications can read from and write to the files and directories within it, just as they would with any other files on the system. Any changes made to the mounted file system are reflected in the underlying storage device.
|
||||
|
||||
Mounting a file system allows users and programs to interact with the files and directories stored within it, incorporating the contents of the file system seamlessly into the overall file hierarchy of the operating system.\\
|
||||
|
||||
\textbf{A file system may be mounted during the boot time only.} False, see File System Internals.
|
||||
|
||||
\section{Security}
|
||||
\textbf{Phishing is an attack, in which a legitimate-looking e-mail misleads a user into entering confidential information}, true see p8 Security.
|
||||
\\
|
||||
\textbf{A protection domain is a collection of access rights, each of which is} a pair <object-name, rights-set> see p 13 Protections\\
|
||||
\textbf{UNIX operating system associates a protection domain with threads}, false, see p 10 protection.
|
||||
|
||||
\section{Virtualization}
|
||||
\textbf{Hypervisors of Type1 show higher latency than hypervisors of Type2}, false, see p18 Virtualization\\
|
||||
\textbf{Hypervisors of Type2 are designed for endusers rather than service providers} true, see p10 virtualization.\\
|
||||
\textbf{Paravirtualization refers to modifying the OS kernel to avoid traps and calling APIs from the hypervisor (hypercalls)} The PDF mentions that paravirtualization takes an approach of modifying the source code of the guest operating system to avoid causing traps. This is done by calling APIs from the hypervisor, which are known as hypercalls. Therefore, your statement is correct. see p19\\
|
||||
\textbf{Nothing can provide an abstraction to the operating system itself} False, Containers are an abstractino of the OS itself. see p. 23\\
|
||||
\end{multicols*}
|
||||
\end{document}
|
||||
BIN
sweng/images/Spiralmodel_nach_Boehm.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
sweng/images/aa1.png
Normal file
|
After Width: | Height: | Size: 75 KiB |
BIN
sweng/images/aa2.png
Normal file
|
After Width: | Height: | Size: 95 KiB |
BIN
sweng/images/aggregation.png
Normal file
|
After Width: | Height: | Size: 143 KiB |
BIN
sweng/images/aktivitaetsdia.png
Normal file
|
After Width: | Height: | Size: 182 KiB |
BIN
sweng/images/assosiationsklassen.png
Normal file
|
After Width: | Height: | Size: 345 KiB |
BIN
sweng/images/assozation.png
Normal file
|
After Width: | Height: | Size: 98 KiB |
BIN
sweng/images/bedproblems.png
Normal file
|
After Width: | Height: | Size: 116 KiB |
BIN
sweng/images/blkboard.png
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
sweng/images/bsplayering.png
Normal file
|
After Width: | Height: | Size: 129 KiB |
BIN
sweng/images/dependencyinversion.png
Normal file
|
After Width: | Height: | Size: 159 KiB |
BIN
sweng/images/diagrammewiewowas.png
Normal file
|
After Width: | Height: | Size: 269 KiB |
BIN
sweng/images/driverstubs.png
Normal file
|
After Width: | Height: | Size: 109 KiB |
BIN
sweng/images/einfachesdesign.png
Normal file
|
After Width: | Height: | Size: 169 KiB |
BIN
sweng/images/einfachesdesign2.png
Normal file
|
After Width: | Height: | Size: 247 KiB |
BIN
sweng/images/entscheidungstabelle.png
Normal file
|
After Width: | Height: | Size: 153 KiB |
BIN
sweng/images/fliessband.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
sweng/images/fliessbandbsp.png
Normal file
|
After Width: | Height: | Size: 249 KiB |
BIN
sweng/images/kfg2.png
Normal file
|
After Width: | Height: | Size: 198 KiB |
BIN
sweng/images/kfproblems.png
Normal file
|
After Width: | Height: | Size: 314 KiB |
BIN
sweng/images/klassendiagramme.png
Normal file
|
After Width: | Height: | Size: 309 KiB |
BIN
sweng/images/kommunikationsdia.png
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
sweng/images/kontrollflussgraph.png
Normal file
|
After Width: | Height: | Size: 73 KiB |
BIN
sweng/images/layering.png
Normal file
|
After Width: | Height: | Size: 178 KiB |
BIN
sweng/images/paketdia.png
Normal file
|
After Width: | Height: | Size: 305 KiB |
BIN
sweng/images/regressionstestdia.png
Normal file
|
After Width: | Height: | Size: 408 KiB |
BIN
sweng/images/rueckkopplung.png
Normal file
|
After Width: | Height: | Size: 170 KiB |
BIN
sweng/images/sequenzdia.png
Normal file
|
After Width: | Height: | Size: 128 KiB |
BIN
sweng/images/spezifikation.png
Normal file
|
After Width: | Height: | Size: 108 KiB |
BIN
sweng/images/sprachschablonen.png
Normal file
|
After Width: | Height: | Size: 291 KiB |
BIN
sweng/images/taxonomieQS.png
Normal file
|
After Width: | Height: | Size: 138 KiB |
BIN
sweng/images/testbigsmall.png
Normal file
|
After Width: | Height: | Size: 222 KiB |
BIN
sweng/images/testenueberblick.png
Normal file
|
After Width: | Height: | Size: 238 KiB |
BIN
sweng/images/umlklassen.png
Normal file
|
After Width: | Height: | Size: 329 KiB |
BIN
sweng/images/umlspezdia.png
Normal file
|
After Width: | Height: | Size: 228 KiB |
BIN
sweng/images/usecasedia.png
Normal file
|
After Width: | Height: | Size: 200 KiB |
BIN
sweng/images/uwdia.png
Normal file
|
After Width: | Height: | Size: 184 KiB |
BIN
sweng/images/vererbung.png
Normal file
|
After Width: | Height: | Size: 204 KiB |
BIN
sweng/images/zustandsdia.png
Normal file
|
After Width: | Height: | Size: 325 KiB |
664
sweng/main.tex
Normal file
@ -0,0 +1,664 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage[landscape]{geometry}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\usepackage{enumitem}
|
||||
\usepackage{fancyhdr}
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
\usepackage{wrapfig}
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%x
|
||||
{\normalfont\large\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{-1explus -.5ex minus -.2ex}%
|
||||
{0.5ex plus .2ex}%
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
% -----------------------------------------------------------------------
|
||||
\pagestyle{fancy}
|
||||
\fancyhead{}
|
||||
\fancyfoot{}
|
||||
% Set the right side of the footer to be the page number
|
||||
\fancyfoot[R]{\thepage}
|
||||
\title{Software Engineeriung HS22 UniBas}
|
||||
|
||||
\begin{document}
|
||||
|
||||
\raggedright
|
||||
\footnotesize
|
||||
|
||||
\begin{center}
|
||||
\Large{\textbf{Sweng HS22}} \\
|
||||
\end{center}
|
||||
\begin{multicols*}{3}
|
||||
\setlength{\premulticols}{1pt}
|
||||
\setlength{\postmulticols}{1pt}
|
||||
\setlength{\multicolsep}{1pt}
|
||||
\setlength{\columnsep}{1pt}
|
||||
|
||||
\section{Intro}
|
||||
\subsection{Warum ist Sweng Wichtig?}
|
||||
\begin{itemize}
|
||||
\item Komplexität geht stetig rauf.
|
||||
\item Technologien sind reif und Infrastruktur vorhanden: 80J Computer, 60J $\mu$Prozessor, 40J Internet, 20 Jahre Smartphones.
|
||||
\item Probleme der Komplexität:
|
||||
\begin{itemize}
|
||||
\item Wie versteht man ein Multi-File Programm?
|
||||
\item Wie ändert man ein Multi-File/riesiges Programm/Code-base?
|
||||
\item Wie findet man in solchen Projekten Fehler?
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
\subsection{Was ist Sweng?}
|
||||
\begin{itemize}
|
||||
\item "The application of a systematic, disciplined, and quantifiable approach to the developement, operation, and maintenance of software"\textsubscript{-IEEE Standard Glossary of S.E.T.}
|
||||
\item "The multi-person construction of multi-version software"\textsubscript{-David Parnas}
|
||||
\item More than coding; At the center of informatics but more than that; Teamwork/Interdisciplinary.
|
||||
\end{itemize}
|
||||
|
||||
\section{Eigenschaften von Software}
|
||||
|
||||
\subsection{Das Produkt "Software"}
|
||||
|
||||
Was ist speziell an Software als Produkt?
|
||||
\begin{itemize}
|
||||
\item Formbarkeit(\textit{Software is malleable}): Software als Produkt lässt sich leicht und ohne teuren Änderungen an den Produktionsanlagen verändern. Software Design ist jedoch starr; Software wird ohne Designänderungen angepasst: \textbf{Produktion \& Design divergieren}.
|
||||
\item Immaterialität: Traditionelles Produkt: Herestellung grösster Kostenfaktor. Fokus auf garantie reibungslose Produktion. QS in Produktion. \textbf{Software:} Herstellung is trivial (kopieren). Fokus liegt auf \textit{Design \& Implementation}. QS geschieht \textit{im Entwicklungsprozess}.
|
||||
\item Keine Naturgesetze: Es gibt meine/wenige vordefinierte Regeln/Strukturen. Der Lösungsraum ist nicht von Vornherein klar. Das logsiche Folgern/Ableiten von Eigenschaften ist schwierig: Entwicklung und Verifikation dadurch erschwert. \textbf{Alle logischen Strukturen und Regeln müssen explizit design und dokumentiert werden.}
|
||||
\item \textbf{Zusammenfassung: }Software leicht änderbar - Design nicht. Qualität der Sw. in der Entwicklung gesichert. Keine verlässlichen Strukturen vorgegeben. Folgerung: \textbf{Sw.entwicklung braucht disziplin und gute Prozesse.} Enter Sweng.
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Qualitätsmerkmale von Software}
|
||||
Intern: Sieht Entwicklerteam. Extern: Sieht Benutzer.
|
||||
\includegraphics[scale=0.2]{images/taxonomieQS.png}\\
|
||||
|
||||
\textbf{Korrektheit: }•Spezifische \textit{Anforderungen} erforderlich. •Korrektheit ist absolut (Code ist NICHT "ein bisschen falsch"). •Formale Spezifikation erlauben Korrektheitsbeweise.
|
||||
\textit{Softzware ist korrekt, wenn sie die spezifizierten funktionalen Anforderungen erfüllt.}\\
|
||||
\textbf{Zuverlässigkeit: }\textit{W.keit, dass in einem bestimmten Zeitintervall kein Fehler auftritt.}. Aka. Benutzer kann sich auf Sw. verlassen. $\text{Korrekt}\subseteq \text{Zuverlässig}$.\\
|
||||
\textbf{Robustheit: }Software verhält sich auch in unvorhergesehenen Situationen Sinnvoll: Netzwerkausfall, Hardwareausfall, Input ist Quatsch. Diese Eigenschaft ist nicht leicht zu spezifizieren. Korrektheit vs. Robustheit: Anforderung spezifiziert -> Korrektheit. Anforderung nicht spez. -> Robustheit.\\
|
||||
\textbf{Wartbarkeit: }Wie einfach lässt sich die Sw. weiterentwickeln und neuen Gegebenheiten anpassen?
|
||||
Wieso Wartbarkeit: Software Aging. Wieso gibt es Software-Aging?: •Neue Hardware •Neue OS •Neue Anforderungen an SW. (Gesetzte, Marktdruck) •Fehlerbehebung • Verbesserung (Performance/Wartbarkeit etc.). \textbf{Meisten kosten NACH der Entwicklung.}\\
|
||||
\textbf{Verständlichkei: }Wie gut findet man isch im Code zurecht. Ist er einfach zu verstehen? Für jemand der es nicht selbst geschrieben hat?\\
|
||||
\textbf{Wiederverwendbarkeit: }Ist es einfach die SW. in einem anderen Kontext wiederzuverwenden? Lsg.ansatz:•Komponentenbasierte Entwicklung: Kosten für Verifikation, Wartung etc. /Komponente und nicht /Produkt.\\
|
||||
\textbf{Portabilitzät: }Grad der Platformabhängikeit. Bsp. Plattformen: •Browser •OS •Hardware(AMD, Apple Silicon, Intel)\\
|
||||
\textbf{ZF: } \\•Korrektheit von SW. wird immer durch Anforderungen bestimmt - Ohne Anforderungen keine Korrektheit, nur Robustheit.\\•Gute Software is Wartbar: Wartung verursacht Grossteil der Kosten. Das Qualitätskriterum "Wartbarkeit" ist durch viele andere Q.krit. beeinflusst.
|
||||
|
||||
\textbf{Prozessmerkmale:}•Transparenz •Verfpgbarkeit •Produktivität
|
||||
|
||||
\subsection{No Silver Bullet. Frederick P. Brooks, Jr. ZF. und Analyse}
|
||||
"Fashioning complex conceptual constructss is the \textit{essence; accidental}-tasks arise im representing the constructs in language. Past progress has so reduced the accidental tasks that future progress now depends upon addressing the essence."\textsubscript{-F.P.Brooks,Jr.}\\
|
||||
Weder techonolgische noch management Entwicklungen versprechen eine Verbesserung der Produktivität, der Einfachheit und der Verlässlichkeit um grössenordnungen. \\
|
||||
•Essence: Difficulties inherent in Sw.\\
|
||||
•Accidents: those that attend but are not inherent\\
|
||||
Hard Part of Building Sw.: Specification, design, testing of the conceptual construct, not the labour of representing it and testing the fidelity thereof.\\
|
||||
Essentielle Eigenschaften von Sw.:
|
||||
•Complexity: Erschwierigt kommunizieren in und zwischen Teams führt zu Fehler im Produkt, zu hoehn Kosten, verspätungen. Erwietern ist nicht repetitive sondern Produziert neue Komplexitäten. Die Enumerierung und das Verstehen werden der möglichen Zustände der Sw. führt zu Unzuverlässlichkeit. Die Komplexität v. Funktionen führt dazu, dass sie schwierig aufzurufen sind. Die Copmlexe Struktur führt zur Schwierigkeit, die Sw. zu erweitern, ohne Seiteneffekte einzuführen.\\
|
||||
•Conformity: SW. muss Menschen gerecht werden und verschieden Sprachen, Kulturen, Gewohnheiten gerecht werden. Das ist arbiträre Komplexität die nicht aufgehoben werden kann. Aka: Die interfaces sind unendlich vielfältig und verändern sich nicht weil sie müssen, sondern weil sie von verschieden Personen designt wurden. Die komplexität der Konformität von Sw. zu Interfaces kann nicht durch redesign simplifiziert werden.\\
|
||||
•Changeability: Weil Sw. in einer ewig-verändernder Matrix aus Applikationen, Gesetzten, HW lebt, wird sie ewig zum anpassen und assimilieren gezwungen. \\
|
||||
•Invisibility: Sw. ist unsichtbar und undarstellbar. Die Strukturen der Sw. sind also schwer zu überschauen. Tools etc. können zwar helfen, aber werden nicht eine verbesserung in der Grössenordung versprechen.\\
|
||||
\textbf{Möglcihe Silver bullets, und wieso sie doch nicht solche sind.}\\
|
||||
Die meisten Verbesserungen haben die Accidents aber nicht die Essenz v. SW. verbessert.
|
||||
\textit{HighLvlLang:}Address: Accidental complexity of LowLvl Programming into more abstract realm. why not: Complexities have now been removed and marginal reduciton will not be order of magnitude.\\
|
||||
\textit{Obect-Oriented programming: }Addresses: Removes higher order accidental complexity of representation in teh computer to express the design one wants to achieve. Why not: can only remove all accidental problems from the expression of design. The complexity of the design itself is unchanged.
|
||||
\textit{AI: }problem is knowign what to say not saying it. expert systems need experts. \\
|
||||
\textit{Graphical Programming: }sw. is fundamentally not visaalizable.\\
|
||||
\textit{Sw. verification:} Sw. verification does not mean a error proof sw. Verification can only establish that sw. meets its specification, but that mustn't be a complete not consistent specification. The specification itself must be debugged...\\
|
||||
\textbf{Promising attacks on conceptual essence: }\\
|
||||
Problem w/ techonogical breakthroughs/attacks on the essence is that they're limited by productivity. If no more improvements can be made in that respect, attacks on the essence of software should be done:\\
|
||||
•Buy don't Build: Specialization und tried and teseted sofware is probably better, cheaper\\
|
||||
•Requirements refinement and rapid prototyping: Hardest part of building system is deciding what to build: Iterative extraction and refinement of the requirements. So developeing approaches for rapid prototyping and iterative specification refinement would attack the essence of the problem.: formulating what one wants and knowing how it looks.\\
|
||||
•Incremental dev. - grow dont build: First get it to run and then piece by piece build it up.Then, at every stage of the process, one has a working system.\\
|
||||
•Great designers\\
|
||||
\section{Prinzipien der Softwareentwicklung}
|
||||
$\text{Prinzipien(=stabiler Kern)}\subset\text{Methoden/Techniken(um Prinzipien zu folgen)}\subset\text{Methodologien(Begründen weshalb eine Methode gut ist)}\subset\text{Tools=schnelllebige Werkzeuge}$.
|
||||
\textbf{Was sind gute Prinzipien?} Abstrakt und Falifizierbar. Bsp. Schlecht: use JUnit/ schreibe qualitativ gute Sw. Bsp. gut: Schreibe Tests immer zuerst.
|
||||
\textbf{Wichtige Prinzipien: }\\
|
||||
•Genauigkeit und formales Vorgehen: (unereichbares) Ideal: Mathematische Präzision. Erreichbar: klare Regeln; klare Prozesse; eindeutig defi. Anforderungen; rigurose Tests.\\
|
||||
•Seperation of cencerns: Devide and Conquer; reduziert komplexität; Aufgaben und Verantwortung können verteilt un Paralellisiert werden. Produkt: Anforderungen Separat betrachten: Funktionalität; Performance; GUI etc. Prozess: Testing und Entwicklung (versch. Teams?); Phasen im Wasserfallmodell.\\
|
||||
•Modularität: Komplexes System in kleine Teile zerlegt;\textbf{Wichtigstes Prinzip der Sw. Entwicklung}; reduziert Komplexität und somit erlaubt erst Trennung der Verantwortung\\
|
||||
•Abstraktion: Details ignorieren; Spetialfall v. trennung der Verantwortung: wichtig(Funktion) und unwichtig (Details)\\
|
||||
•Design for Change \\
|
||||
•Allgemeinheit: gibt es ein Generelles Problem hinter meinem Problem? Bsp. Funktion Generisch schrieben statt für Strings \\
|
||||
•Incrementality: Grow statt build; Erstes Resultat nur approximierung dafür in jedem Schritt etwas Brauchbares.
|
||||
|
||||
\subsection{Modularität: }
|
||||
Ein Modul ist ein wohldefinierter Teil der Software. z.B. Funktion/Methode, Klasse, Sammlung v. Daten, Packete etc. \\
|
||||
Modularität erlaubt \textbf{Dekomposition und Komposition} und \textbf{Seperation of Concers} in zwei phasen:\\
|
||||
1. Bottom-up Design: Ausarbeiten der Details eines Moduls ohne odere Module zu beachten. 2.Top-Down design: Integration ins Gesamte unter berücksichtigung der Beziehungen zw. den Modulen: Vernachlässigung der Details der Module.\\
|
||||
\textbf{Verschiedene Modulbeziehungen: }\\
|
||||
•"Uses": A "uses" b. A nutzt Funktionalität v. B. B stellt Funktionalität zur Verfügung.\\
|
||||
•"is component of"(Komposition): Modul ist aus einfacherern Moduln zusammengesetzt. Beschreibung eines Moduls auf höherer Abstraktionsebene.\\
|
||||
•Hierarchie: gdw. keine Zyklen. Ebene in einer Hierarchie: $M_i$ is auf Ebene 0, falls $\not\exists M_j$, s.t. $M_iRM_j$. Sei k die höchste eben aller Module $M_j$, s.t. $M_i R M_j$. Dann ist $M_i$ auf Ebene k+1.
|
||||
\subsection{Software Aging}
|
||||
Wieso altert Sofwtare?:\\
|
||||
•Sie wird nicht neuen gegebenheiten Angepasst\\
|
||||
•Die Anpassungen sind nicht gut(neue Bugs werden eigeführt, Dokumentation nicht erneuert)\\
|
||||
•Nicht selbs software aging: Allocated memory isn't released\\
|
||||
Kosten v. Sw.-Alterung:\\
|
||||
•Clienten wechslens auf neue Produkte weil Sw. nicht mehr das kann was Kunden wollen.\\
|
||||
•Leistung nimmt ab: Sw. wird langsamer auch wegen schlechtem Design\\
|
||||
•Maintenece introduces bugs and thus decreases reliability.\\
|
||||
Reducing costs of Sw. againg:\\
|
||||
•Focus on maintenence. Focus on good, solid engineering!\\
|
||||
How do we slow down sw.aging and limit it's effects?\\
|
||||
Preventing Sw. aging:\\
|
||||
•Design for Succes: Design for Change + Focus on what is most likely to change, since it's impossible to make everything equally easy to change. Make it so that future changes only effect a small amount of code.\\
|
||||
•Take dokumentation seriously\\
|
||||
•Do peer reviews.\\
|
||||
Dealing w/ old software:\\
|
||||
•Stopp deterioration: Introduce, or recereate, structure when changes are made.
|
||||
•Retroactive dokumentation.\\
|
||||
|
||||
\section{Software Design}
|
||||
\subsection{Aufbau von Moduln}
|
||||
\subsubsection{Interface und Implementation}
|
||||
•Trennung Implementation und Interface\\
|
||||
•Ermöglich Trennung der Verantwortlichkeiten\\
|
||||
•Client nutzt Funktionalität, Server sorgt dafür das Fuktionalität richtig Implementiert ist.\\
|
||||
•\textit{Interface } abstrahiert Funktionalität. Nur was Client wissen muss wird beschrieben.$\Rightarrow$ Details der Implementation sind unsichtbar für Client. $\Rightarrow$ \textit{Implementation soll geändert werden könnnen ohne dass Client geändert werden muss.}\\
|
||||
•\textit{Information Hiding:} Komponenten die sich (eher) ändern werden sollten immer hinter einem Interface vor dem Client versteckt werden. In OO durch Klassen.\\
|
||||
•\textbf{Information Hiding is Prinzip, Kapselung die Methode wodurch das Prinzip erwirkt wird.}
|
||||
|
||||
\subsection{Sw. Architecture \& Design}
|
||||
\textbf{Was ist Design? }\textit{Formgebende und funktionaleGestaltgebung eines Produkts.}
|
||||
•Strukturiert ein Artefakt\\
|
||||
•Zerlegung eines Systems in (einfachere) Komponenten $\Rightarrow$ Trennung der Verantwortlichkeiten.\\
|
||||
•Sicherstellen das Anforderungen erfüllt sind.\\
|
||||
•Ästhetik is nicht primäre Aufgabe.\\
|
||||
•Interaktion muss auch Design werden.
|
||||
\\
|
||||
\textbf{Design in Sweng:}\\
|
||||
•Softwaredesign: Struktur und Entwurf der Module\\
|
||||
Zwei Bedeutungen in Sweng $\rightarrow$\\
|
||||
1. Schritt zwischen Anforderungsanalyse undf Implementation: Erstellen der Softwarearchitektur/Softwaredesigns\\
|
||||
2. Strukturierung eines Artefakts: Klassendesign in einem OO-System; Design des Anforderungsdokuments.\\
|
||||
\textbf{Design vs. Arch.}\\
|
||||
\textit{Softwarearchitektur:} Struktur der Module (Design des gesamten Systems)\\
|
||||
\textit{Software-/Moduldesign:} Entwurf individueller Module (Design der Teile)\\
|
||||
Aber: Module enthalten selbst Module $\Rightarrow$ Keine Strikte Trennung möglich.\\
|
||||
\textbf{Design Ziele:}\\
|
||||
•Struktur so festlegen, dass hohe Sw.-qualität erreicht \textit{werden kann}.\\
|
||||
•Wichtigstes Prinzip: \textit{Design for Change.} aka. Wahrscheinliche Änderungen sollten einfach zu implementierensein.\\
|
||||
• "There are two ways of constructing a software design: One way is to make it so simple that there are obviously no deficiencies, and the other way is to make it so complicated that there are no obvious deficiencies." -Tony Hare.
|
||||
•\textbf{Kopplung: }Wie starks sind Module verknüpft.\\
|
||||
•\textbf{Bindung: }Wie gut bilden die Module eine logische Einheit?\\
|
||||
•\textbf{Ideal: }Schwache Kopplung, starke Bindung.\\
|
||||
\includegraphics[width=\linewidth]{images/einfachesdesign.png}\\
|
||||
\includegraphics[width=\linewidth]{images/einfachesdesign2.png}\\
|
||||
\includegraphics[width=\linewidth]{images/layering.png}\\
|
||||
\includegraphics[width=\linewidth]{images/bsplayering.png}\\
|
||||
\textbf{Pipelining: }\\
|
||||
Virtuelles Fliessband:\\
|
||||
•Hintereinander ausführen der Arbeitschritte\\
|
||||
•Module nutzen nur ein weiteres Modul\\
|
||||
\includegraphics[width=\linewidth]{images/fliessband.png}\\
|
||||
\includegraphics[width=\linewidth]{images/fliessbandbsp.png}\\
|
||||
\textbf{Blckboard/Datarepository}\\
|
||||
•Gute Orga wenn viele Module miteinander kommunizieren müssen\\
|
||||
•Zentrales Modul dient als globaler Speicher\\
|
||||
\includegraphics[width=\linewidth]{images/blkboard.png}\\
|
||||
|
||||
\section{OO-Design}
|
||||
\textbf{Polymorphismus: }Variablen können zur Laufzeit an verschiedene (verwandte) Typen gebunden werden.\\
|
||||
\textbf{Dynamische Bindung: }(late binding)Methode wird zur Laufzeit entsprechend dem Objekttyp verwendet.\\
|
||||
•OO Sprachen helfen Design direkt abzubilden. Nicht OO Sprachen verlangen eienen Mehraufwand. Wichtig sind jedoch die Prinzipien, nicht die Sprache.
|
||||
\textbf{Designtipps: }\\
|
||||
•Fokus auf Konzepten nicht Klassen oder Daten\\
|
||||
•Inkrementel aufbauen/nicht alles auf einmal\\
|
||||
•Prototyping\\
|
||||
•Optimierungskriterien: Trennung der Verantwortlichkeit; Reduziere Modulkopplung; Köhesion verbessern. In der objektorientierten Programmierung beschreibt Kohäsion, wie gut eine Programmeinheit eine logische Aufgabe oder Einheit abbildet(v. Wikipedia).\\
|
||||
|
||||
\subsection{SOLID Prinzipien}
|
||||
Klassiche Sweng Prinzipien auf OO angewandt.
|
||||
•\textbf{S}ingle Responsibility Principle\\
|
||||
Es sollte nie mehr als einen Grund geben eine Klasse zu ändern. Objecte sollten hohe Kohäsion aufweisen. Jede Klasse soll nur für eine Sache verantwortlich sein. Verwandtes Prinzip: Seperation of Concerns. Bsp. Schlecht: Klasse führt Konto und erstatted bericht - Gut: Führt nur Konto.\\
|
||||
•\textbf{O}pen Closed Principle\\
|
||||
Module sollten offen für Erweiterungen, aber geschlossen für Modifikationen sein. Ermöglicht neue Features ohne ursprünglichen Code zu verändern. Minimeirt Risk, dass existierende Funktionen wegen Änderung kaputt gehen. $\Rightarrow$ Verwende Interfaces in Klassen statt Spezifische Klassen.
|
||||
Verwandes Prinzip: Design for Change\\
|
||||
•\textbf{L}ukovsches Substitutionsprinzip\\
|
||||
Sei $\phi(x)$ eine beweisbare Eigenschaft v. Objekt $x$ von Type $T$. Dann gilt $\phi(y)$ für Objekte $y$ von Typ $S$, falls $S$ ein Untertyp von $T$ ist::Jedes Objekt kann durch ein Objekt der Subklasse ersetzt werden, ohne dass sich das Verhalten ändert. Kein überarschendes Verhalten $\rightarrow$ einfacherer Code. Bei nicht gut Definierten Schnittstellen und Klassen schwierig zu prüfen und verlangt Spezifikation.\\
|
||||
•\textbf{I}nterface Segregation\\
|
||||
Clients sollten nicht gezwungen werden von Interfaces abzuhängen, die sie nicht verwenden. $\Rightarrow$ Kleine Interfaces mit wohldefinierter Funktionalität; Klassen entkoppeln. Verwandtes Prinzip: Seperation of Concerns/Trennung der Verantwortlichkeiten\\
|
||||
•\textbf{D}ependency Inversion\\
|
||||
Module auf höherer Ebene sollten nicht von tiefer gelegenen abhängen. Abstraktionen sollten nicht v. Details abhängen. Details sollten v. Abstr. abhängen. Verwandte Printipien: Design for Change; Modularität.\\
|
||||
\includegraphics[width=\linewidth]{images/dependencyinversion.png}\\
|
||||
\textbf{Gesetz v. Demeter}\\
|
||||
Methode $m$ aus Klasse $K$ soll nur auf folgende Programm-Teile zugreifen dürfen:\\
|
||||
•Methoden v. $K$ selbst\\
|
||||
•Methoden v. Objekten die als Argumente an $m$ übergeben werden\\
|
||||
• " " ", die in Instanzvaariablen von $K$ abgelegt sind\\
|
||||
• " " ", die $m$ erzeugt\\
|
||||
|
||||
\section{Anforderungsanalyse}
|
||||
\subsection{Aufgabe der Aa.}
|
||||
Anforderung an System\\
|
||||
•ermittlen\\
|
||||
•spezifizieren\\
|
||||
•analysieren\\
|
||||
•validieren\\
|
||||
und daraus Lösungen herleiten. Ergebniss: Anforderungsspezifikation (Pflichtenheft)\\
|
||||
Wieso braucht es Spezifikation? Abstimmung m/ Kunde;Entwurf und Implementation; Benutzerhandbuch; Testvorbereitung; Wiederverwendung.
|
||||
|
||||
\subsection{Schematischer Ablauf}
|
||||
\begin{wrapfigure}{l}{0.09\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=0.09\textwidth]{images/aa2.png}
|
||||
\end{wrapfigure}
|
||||
\textbf{Lastenheft:} Gesamtheit der Forderungen an die Lieferungen und Leistungen eines Auftragnehmers innerhalb eines Auftrags aus Auftraggebersicht. Dient als Vertragsgrundlage. Kann vage, lückenhaft, inkonsistent sein. Beantwortet Frage: Was mus gemacht werden? $\Rightarrow$ z.B. Github Issue.
|
||||
|
||||
\textbf{Pflichtenheft: }Erarbeitete Realisierungsvorgaben basierend auf Lastenheft.• Anwendervorgaben werden detailliert; Realisierungsforderungen werden beschrieben; Definiert wie und womit die Anforderungen zu realisieren
|
||||
sind; Muss vollständig klar und konsistent sein. Beantwortet Frage: Wie wird es umgesetzt? $\Rightarrow$ z.B. Unser Pflichtenheft mit Details.
|
||||
|
||||
\underline{Lösung in einem Schritt wird zum Problem des nächsten:}
|
||||
\includegraphics[width=\linewidth]{images/rueckkopplung.png}\\
|
||||
|
||||
\subsection{Anforderungen}
|
||||
Anforderungen müssen
|
||||
Inhaltlich:
|
||||
\begin{itemize}
|
||||
\item Korrekt
|
||||
\item Eindeutig
|
||||
\item Vollständig
|
||||
\item Konsistent
|
||||
\item Klassifizierbar nach Wichtigkeit
|
||||
\item """ Stabilität
|
||||
\item Testbar/überprüfbar
|
||||
\item verfolgbar
|
||||
\end{itemize}
|
||||
sein.\\
|
||||
Formal:\\
|
||||
\begin{itemize}
|
||||
\item verständlich
|
||||
\item präzise
|
||||
\item leicht erstellbar
|
||||
\item leicht verwaltbar
|
||||
\end{itemize}
|
||||
Anforderungen legen fest was Stakeholder von einem Sw.system al Eigenschaften erwartet.
|
||||
|
||||
|
||||
\textbf{Funktionale Eigenschaften:}\\
|
||||
Legen von Sw. bereitzustellende Funktionen fest. Leitfrage: Was muss die Sw. tun? Z.B. "Sw. muss von Kunde PIN abfragen".\\
|
||||
\textbf{nicht-fkntale Eigenschaften:}\\
|
||||
Alle anderen. Qualitätsmerkmale(Performance, Wartbarkeit..); Sicherheitsanforderungen etc.\\
|
||||
|
||||
\textbf{Individualsoftware:} -Stakeholder einfacher zu identifizieren weil im Kundenauftrag entwickelt und somit Benutzer auch meist bekannt und als Stakeholder eingebunden werden\\
|
||||
- (Kunden/Benutzer) Anforderungen können eher schon zu beginn vollständig ermittelt werden. Aber: sind widersprüchlich.\\
|
||||
- Sw. langlebig und hat Schnittstellen zu anderen Systemen des Kunden.\\
|
||||
- Durch Integration in vorhandene Prozesse und Nachbarsysteme entstehen viele detaillierte Vorgaben.\\
|
||||
- Lange Ermittlung und Priorisierung v. Anf. meist nicht gemacht.\\
|
||||
- PROBLEMATISCH: Konsolidierung der Anforderungen , bei der Inkonsistenzen und Anforderungslücken eliminiert werden.\\
|
||||
- Software meist einmal installiert und selten geupdatet.
|
||||
\textbf{Standardsoftware: }\\
|
||||
- Anforderungen stammen aus vielfältigen Infokanälen.\\
|
||||
- Spätere User zum Entwicklungszeitpunkt nicht bekannt. Sind die Benutzerdaten die erhoben wurden repräsentativ?\\
|
||||
- Spätere Art der Nutzeung und Arbeitsabläufe beim Kunden können sich sehr von den Ideen des Marketings unterscheiden.\\
|
||||
- Customizing sollte daher möglich sein.\\
|
||||
- Wird in vielen verschieden Installationen betrieben, in unterschiedlichen Versionen.\\
|
||||
\includegraphics[width=\linewidth]{images/sprachschablonen.png}
|
||||
|
||||
\section{Sw.spezifikation}
|
||||
\begin{wrapfigure}{l}{0.09\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=0.09\textwidth]{images/spezifikation.png}
|
||||
\end{wrapfigure}
|
||||
\textbf{Arten v. Spez.}\\
|
||||
\begin{itemize}
|
||||
\item Lastenheft: Was User erwartet
|
||||
\item Pflichtenheft: Welche Funktionen System zur verfügung stellt
|
||||
\item Schnittstellenspezifikaiton: Externes Verhalten v. Moduln
|
||||
\item Architekturspez/Design
|
||||
\item Laufzeitverhalten
|
||||
\item Interne Struktur v. Moduln: Hilfreich bei Wartung/Erweitern
|
||||
\end{itemize}
|
||||
Spezifikation is Vereinbarung zw. Produzent/Konsument.\\
|
||||
\textbf{Zweck v. Sw.spez.?}\\
|
||||
\begin{itemize}
|
||||
\item Ohne Spezifikation ist Sw. weder korrekt noch zuverlässig(vgl. jeweilige Def.)
|
||||
\item "W/o requirements and design, programming is the art of adding bugs to an empty text file." - Louis Srygley
|
||||
\item IEEE Glossar Definition: A document that specifies, in a complete, precise, verifiable manner, the requirements, design, behavior, or other characteristics of a system or component and, often, the procedures for determining whether these provisions have been satisfied.
|
||||
\item $\Rightarrow$ Anforderungen festhalten
|
||||
\item $\Rightarrow$ Schnittstellen festhalten
|
||||
\begin{itemize}
|
||||
\item Klare Def./Abgrenzung d. Sw.sys.
|
||||
\item Def. User-/Sensorinputraum
|
||||
\item Mehrdeutigkeiten und Inkonsistenzen eliminieren
|
||||
\end{itemize}
|
||||
\end{itemize}
|
||||
Spez. ist ein WICHTIGER Ref.punkt. während der Entwicklung und Wartung.\\
|
||||
\textbf{Qualitätsmerkmale v. Sw.spez. und wieso sie schwierig zu erreichen sind}\\
|
||||
\begin{itemize}
|
||||
\item Präzise, Korrekt, Eindeutig
|
||||
\item Konsistent (Widerspruchsfrei)
|
||||
\item Vollständig
|
||||
\item Inkrementell: \begin{itemize}
|
||||
\item Prozess: Grob zu fein
|
||||
\item Spez. kann auf versch. Detailebenen verstanden werden
|
||||
\end{itemize}
|
||||
Probleme: Mögliche Interessenskonflikte versch. Stakeholder; Sprachliche Barrieren/Missverständnisse; unklare oder gar unbekannte technische Rahmenbedingungen; Sich ändernde Anforderungen und Prioritäten.\\
|
||||
\end{itemize}
|
||||
\textbf{Konstruktiv vs. Deskriptive Spez. Methoden}\\
|
||||
Generelle Methoden: Sprachlich; Logische/Mathematisch aka. Formal; Grafisch. Normalerweise kombi. Übergeordnetes Ziel: max. Klarheit und Verständlichkeit.\\
|
||||
•Konstruktive Methoden: Verhalten anhand einer "Abstrakten Maschine" definiert. Z.b. Beschreibung wie mit Schnur, bleistift etc. ein Kreis gezeichnet werden kann.\\
|
||||
•Deskriptiv: Verhalten anhand v. Eigenschaften definiert: z.B. Kreis def. mit Radius etc.\\
|
||||
Dann jeweils noch auf Spektrum von Informell bis Formal.\\
|
||||
\textbf{Wie Methoden zur Verifikation eines Systems verwendet werden können}\\
|
||||
•Kosntruktiv: Verhalten beobachten; Simulation;Protoypin.\\
|
||||
•Deskriptiv: Analysieren/ Ableiten v. Eigenschaften.
|
||||
\textbf{Wie sieht ein Ideal Sw.spez. Prozess aus, und wieso der in der Praxis nie erreicht werden kann}\\
|
||||
\subsection{A Rational Design Process. D.L. Parnas and P.C. Clemens}
|
||||
\section{UML}
|
||||
\begin{itemize}
|
||||
\item Ist einge Grafische Modellierungssprache
|
||||
\item erlaubt somit Sw. auf hoher Abstraktionsebene zu verfstehen
|
||||
\item Hilft beim \begin{itemize}
|
||||
\item Modellieren
|
||||
\item Dokumentieren
|
||||
\item Kommunizieren
|
||||
\end{itemize}
|
||||
\item Zwei Diagrammarten: Strukturdiagramme (statisch) und Verhaltensdiagramme (dynamisch)
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Klassendiagramme (statisch)}
|
||||
\includegraphics[width=0.7\linewidth]{images/klassendiagramme.png}
|
||||
\includegraphics[width=0.2\linewidth]{images/umlklassen.png}
|
||||
|
||||
\subsection{Assoziationen(1.) und Aggregation(2.)}
|
||||
\includegraphics[width=0.6\linewidth]{images/assozation.png}
|
||||
\includegraphics[width=0.3\linewidth]{images/aggregation.png}
|
||||
2. Impliziert "is componenet of" Beziehung. Verwandt mit Komposition: Bei Komp. können die Teile NICHT ohne das ganze existieren (gezeichent mit leerer Raute). Von Raute zu ursprung ist es eine "besteht aus" Beziehung. Dabei sind sie gleich.
|
||||
\subsection{Assoziationsklassen}
|
||||
Eine Assoziationsklasse (auch Beziehungsklasse) ist in der objektorientierten Analyse eine Klasse, deren Attribute und Operationen die Assoziation zwischen anderen Klassen beschreibt.
|
||||
\includegraphics[width=0.5\linewidth]{images/assosiationsklassen.png}
|
||||
Assoziationsklassen werden in Analysephasen von Prozessmodellen verwendet und später zum Entwurf aufgelöst. Dazu werden entweder die Attribute (und Operationen) auf die Klassen der Assoziation verteilt, oder die Assoziation wird aufgeteilt, indem die Assoziationsklasse als eigenständige Klasse eingefügt wird.
|
||||
Assoziationsklassen beschreiben in der UML, wie Relationships mit ihren Attributen in Entity-Relationship-Diagrammen, eine Beziehung (Relationship) zwischen zwei Klassen (Entities), nehmen also die Eigenschaften auf, die gerade durch diese Beziehung entstehen (Von Wikipedia Copy Paste).
|
||||
\subsection{Vererbung}
|
||||
Modelliert Varianten: Publikation ist Buch oder Artikel.
|
||||
\includegraphics[width=0.5\linewidth]{images/vererbung.png}
|
||||
\subsection{Use-case Diagramm(dynamisch)/Paketdiagramm(statisch)}
|
||||
Beschreibt in Funktion involvierte Akteure/Zeigt Struktur auf ebene der Pakete\\
|
||||
\includegraphics[width=0.5\linewidth]{images/usecasedia.png}
|
||||
\includegraphics[width=0.3\linewidth]{images/paketdia.png}
|
||||
Eigentlich nur logische Spez/OCL Deskriptiv, da sie Mitteld Formeln der Aussagenlogik Eigenschaften Spezifiziert.
|
||||
\subsection{Sequenziagramme (dynamisch)}
|
||||
Konstruktive Methode; Beschreibt Interaktion/Nachrichtenzw. Objekten; Fokus auf Sequenz der Nachrichten\\
|
||||
\includegraphics[width=\linewidth]{images/sequenzdia.png}
|
||||
\subsection{Kommunikationsdiagramme(dynamsich)}
|
||||
Semantisch äquivalent zu Sequenzdia; Fokus auf Objektinteraktion\\
|
||||
\includegraphics[width=\linewidth]{images/kommunikationsdia.png}
|
||||
\subsection{Aktivitätsdiagramme(dynamisch)}
|
||||
Konstruktive Spez.methode;Modelliert Ablauf v. Anwendungsfall; Sequenzielle und parallele Aktivitäten möglich; Semantik basiert auf Petrinetze.
|
||||
\includegraphics[width=0.5\linewidth]{images/aktivitaetsdia.png}
|
||||
\subsection{Zustandsdiagramme(dynamsich)}
|
||||
Basierend auf Theorie der endlichen Automaten.
|
||||
\includegraphics[width=0.5\linewidth]{images/zustandsdia.png}
|
||||
\subsection{UML für Spezifikation}
|
||||
\includegraphics[width=\linewidth]{images/umlspezdia.png}
|
||||
|
||||
\section{Verifikation/Grundlagen des Testens}
|
||||
\subsection{Validierung und Verifikation}
|
||||
\begin{tabular}{l l}
|
||||
\textbf{Validierung} & \textbf{Verifikation} \\
|
||||
• Bauen wir das & • Bauen wirt das \\
|
||||
\emph{richtige Produkt}? & \emph{Produkt richtig}?\\
|
||||
• Bsp. Nachweis & • Nachweis, dass Funktion erfüllt\\
|
||||
medizinischen Nutzens &\\
|
||||
\end{tabular}
|
||||
Validierung Insgesamt schwierig: Kaum formale Dokumente / Spezifikationen auf denen die Anforderungen aufbauen Ansätze:
|
||||
• Frühe Prototypen / Modelle
|
||||
• Inkrementelles Entwickeln
|
||||
• Reviews
|
||||
• Simulationen
|
||||
• Transparente Prozesse mit Stakeholder in Loop
|
||||
\subsection{Wieso muss Software immer Verifiziert werden?}
|
||||
\begin{itemize}
|
||||
\item Fehler lassen sich nie ganz verhindern \begin{itemize}
|
||||
\item Können jeden Prozesschritt betreffen
|
||||
\end{itemize}
|
||||
\item \textbf{Probleme} \begin{itemize}
|
||||
\item Unklarheiten/Mehrdeutigkeiten
|
||||
\item Falsche Annahmen
|
||||
\item Falsche Schlussfolgerungen /Fehler
|
||||
\item Fehler in System Umgebung
|
||||
\end{itemize}
|
||||
\item \textbf{Ansätze:} \begin{tabular}{l l}
|
||||
\emph{Analyse} & \emph{Experimentieren} \\
|
||||
\emph{ (statisch)}& \emph{(dynamisch)} \\
|
||||
• Code Review & • Verhalten des \\
|
||||
& Programms testen\\
|
||||
• Automatisierte & - Manuelle Ausführung \\
|
||||
Codeanalyse & \\
|
||||
• Formale & - Automatisierte Tests \\
|
||||
Korrektheitsbeweise & \\
|
||||
\end{tabular}
|
||||
\end{itemize}
|
||||
|
||||
\subsection{Diverse Methoden zur Verifikaiton}
|
||||
\textbf{Code Review: }Jemand anderes Schaut Programm an. \emph{Ziele: }\\- Missverständnisse und Fehler entdecken\\ - Codequalität verb. \\- Zusammenarbeit stärken \\ - Wissen verbreiten \\
|
||||
\textbf{(statische) Codeanalyse: }Code auf typesche Probleme durchgehen \\ - Uninitializierte Vars\\- Index out of bounds\\- off by one\\- Zugriff auf Null werte\\- unused methods etc. \\
|
||||
Oft durch Entwocklertoolsunterstützt\\
|
||||
\textbf{Korrektheitsbeweise: }\emph{Ziel: } Beweisen, dass Programm Spezifikationen erfüllt.\\- Spezifikationen müssen formal definiert sein.\\
|
||||
- Braucht "automatische Theorembeweiser" für nicht-triviale Programme\\
|
||||
\textbf{Testen: }Verhalten v. Sw. durch Stichproben Eingaben prüfen. \emph{Ziel: }Beispiele von Inkorrektem Verhalten finden.
|
||||
\subsection{Types of Testing}
|
||||
\includegraphics[width=0.9\linewidth]{images/testbigsmall.png}
|
||||
\begin{tabular}{l l}
|
||||
\emph{Whitebox T.} & \emph{Blackbox T.} \\
|
||||
• strukturelles T. & • Funktionales T. \\
|
||||
Partitionierung der T.fälle &\\
|
||||
basierend auf &\\
|
||||
- interne Struktur & - Spezifikation\\
|
||||
Testen was die Sw. &\\
|
||||
- macht & - machen \emph{sollte}\\
|
||||
\end{tabular}
|
||||
\includegraphics[width=0.9\linewidth]{images/testenueberblick.png}
|
||||
•\textbf{Soll-Resultate}\\
|
||||
\emph{Idealfall}\\
|
||||
• Soll Resultat ist eindeutig definiert und entspricht bekanntem Wert\\
|
||||
\emph{Praxis}\\
|
||||
• Mehrere verschiedene richtige Ergebnisse möglich\\
|
||||
• Ergebnis nur vage definiert (Beispiel: Positionierung eines Ausgabefensters)\\
|
||||
• Richtiges Resultat nicht exakt darstellbar (Beispiel: Resultat von $\frac{1}{3}$)\\
|
||||
• Richtiges Resultat nicht bekannt (Beispiel: Berechnung der Zahl $\pi$)\\
|
||||
\emph{Lösung}\\
|
||||
• Eigenschaften der Lösung werden definiert\\
|
||||
Beispiel: assertTrue($3.141\leq pi \leq 3.142$)\\
|
||||
|
||||
•\textbf{Regressionstests}\\
|
||||
IEEE Std. 610.12: Selective retesting of a system or component to verify that modifications have not caused unintended effects and that the system or component still complies with its specified requirements.\\
|
||||
• Änderung an Programm sollte keine unbeabsichtigen Effekte haben.\\
|
||||
• Reale Daten von frühreren Programmläufen können als Soll-Daten verwendet werden.\\
|
||||
\textbf{Typen v. Fehler}\\
|
||||
\begin{tabular}{l l}
|
||||
\emph{Bereichsf.} & \emph{Punktf.}\\
|
||||
- Bestimmter Teilraum & - tritt für genau Einen\\
|
||||
von Eingaberaum & Testfall auf\\
|
||||
reproduziert fehler & \\
|
||||
& - Liegen oft an Bereichsgrenzen\\
|
||||
\end{tabular}
|
||||
$\Rightarrow$ Äquivalenzklassen verwenden.
|
||||
Bereiche wo ähnliches Verhalten erwartet wird bestimmen.\\
|
||||
\emph{Überdeckungskriterium: }\\
|
||||
- Ein Testfall pro Äquivalenzklasse\\
|
||||
- Ein Testfall pro Grenze\\
|
||||
• \textbf{Modultests}\\
|
||||
• \textbf{Integrationstests} more in next Section\\
|
||||
• \textbf{Systemtest} Testen des ganzen Systems gegen die Funktionalen Anforderungen
|
||||
• Wird häufig auf separater Testumgebung durchgeführt • Testumgebung soll Kundenumgebung simulieren\\
|
||||
• \textbf{Akzeptanztest} Test aus Kundensicht - von Kunden durchgeführt
|
||||
Testet die im Pflichtenheft definierten Akzeptanzkriterien Resultat entscheidet ob nachbessert werden muss.\\
|
||||
\subsection{Stubs and Drivers}
|
||||
Modultests benötigen Hilfskonstrukte, falls abhängige Module noch in Entwicklung sind.\\
|
||||
\includegraphics[width=0.9\linewidth]{images/driverstubs.png}\\
|
||||
\textbf{Integrationstest: }
|
||||
Module werden im Verbund getestet\\
|
||||
2 Möglichkeiten\\
|
||||
1. BigBang\\
|
||||
• Alle Module werden nur in Isolation getestet\\
|
||||
• Danach ganzes System testen\\
|
||||
2. Progressivesintegrierenundtesten\\
|
||||
• top down: stub ersetzten\\
|
||||
• bottom up: driver ersetzen\\
|
||||
\subsection{Vor-/Nachteile Top-Down/Bottom-up Integration}
|
||||
\emph{Top-level Module werden gut getestet: }Vorteil Top-Down\\
|
||||
\emph{Benötigt viele Driver: }Nachteil Bottom-Up\\
|
||||
\emph{Das Programm als solches existiert erst am Ende: }Nachteil Bottom-Up\\
|
||||
\emph{Benötigt viele Stubs: }Nachteil Top-Down\\
|
||||
\emph{Fehler in Schnittstellen werden früh entdeckt: }Vorteil Top-Down\\
|
||||
\emph{Früher Prototyp, den man Benutzer zeigen kann: }Vorteil Top-Down\\
|
||||
\emph{Testbedingungen sind einfach zu bestimmen: }Vorteil Bottom-Up\\
|
||||
\emph{Testoutput ist schwierig nachzuvollziehen: }Nachteil Top-Down\\
|
||||
\section{Testen im Kleinen}
|
||||
\subsection{Blackbox}
|
||||
Kriterium: Möglichst gute Abdeckung der spezifizierten Anforderungen\\
|
||||
\emph{Testen aus Spezifikationen: }\\
|
||||
Beispielspezifikation\\
|
||||
Das Programm bekommt als Input eine Repräsentation (Datenstruktur) einer Einzahlung. Diese Einzahlung muss in eine Tabelle von Einzahlungen eingeordnet werden, welche nach Datum sortiert ist. Falls andere Einzahlungen mit demselben Datum existieren, wird die neue Einzahlung nach dem letzten Eintrag dieses Datums einsortiert. Es sollen auch verschiedene Konsistenzchecks durchgeführt werden:
|
||||
• Ist der Kunde bereits im Kundenverzeichnis
|
||||
• Stimmt der Name in den Verzeichnissen überein •...\\
|
||||
Mögliche Äquivalenzklassen\\
|
||||
• Das aktuelles Datum liegt in der Vergangenheit/Zukunft
|
||||
• Es gibt keine/mehrere Zahlungen an diesem Tag
|
||||
• Name ist/ist nicht im Kundenverzeichnis
|
||||
• Zahlung ist mit Datenbank konsistent/inkonsistent\\
|
||||
Grenzfälle\\
|
||||
• Heute
|
||||
• 1 Zahlung\\
|
||||
\emph{Ursache-Wirkung Diagramm:}\\
|
||||
\includegraphics[width=0.9\linewidth]{images/uwdia.png}\\
|
||||
Beispielspezifikation\\
|
||||
Ein Kunde kommt in die Bibliothek und will sich ein Buch ausleihen. Die Bibliothekarin prüft als erstes seinen Ausweis und ob das Benutzerkonto aktiv ist. Falls der Ausweis nicht gültig ist und oder falls der Kunde fällige Mahngebühren nicht bezahlt hat, wird das Konto gesperrt. Falls das Konto aktiv ist, wird zusätzlich überprüft ob der Kunde bereits die maximal mögliche Anzahl von Büchern ausgeliehen hat. Falls dies der Fall ist wird der Ausleihwunsch zurückgewiesen. Ansonsten wird dem Kunden das gewünschte Buch ausgeliehen.\\
|
||||
|
||||
|
||||
|
||||
\emph{Entscheidungstabellen:}\\
|
||||
Überdeckungskriterium: Jede Spalte entspricht einem Testfall.\\
|
||||
\includegraphics[width=0.3\linewidth]{images/entscheidungstabelle.png}\\
|
||||
|
||||
\subsection{Whitebox}
|
||||
Kriterium: Möglichst vollständige Überdeckung vom Code\\
|
||||
- Ableiten von Testfällen anhand interner Struktur\\
|
||||
- Spezifikation nicht im Vordergrund um Testfälle zu finden - Testfälle hängen von Implementation ab\\
|
||||
Struktur vom Code definiert Äquivalenzklassen.\\
|
||||
Überdeckungskriterien:\\
|
||||
\emph{Anweisungsüberdeckung: } \\
|
||||
Wähle Testmenge $T$ so, dass jedes Statement in Programm $P$ mindestens einmal ausgeführt wird (für beliebiges $d\in T$). PROBLEM: Test Menge kann kleiner als Äquivalenzklassen sein. Bsp: if$(x<l) {x =-x;}z=x; x=-3$ deckt alle Statements ab, kein Test für $x>0$.\\
|
||||
\emph{Zweigüberdeckung (Edge Coverage):}\\
|
||||
Die Testmenge $T$ wird so gewählt, dass jeder Zweig des Kontrollflusses mindestens einmal durchlaufen wird.
|
||||
Formalisiert durch \emph{Kontrollflussgraphen}. Jede Kante in Graph muss abgedeckt sein:\\
|
||||
\includegraphics[width=0.3\linewidth]{images/kontrollflussgraph.png}
|
||||
\includegraphics[width=0.65\linewidth]{images/kfg2.png}\\
|
||||
\includegraphics[width=0.9\linewidth]{images/kfproblems.png}\\
|
||||
\emph{Bedingungsüberdeckung:}\\
|
||||
|
||||
\begin{wrapfigure}{l}{0.12\textwidth}
|
||||
\centering
|
||||
\includegraphics[width=0.12\textwidth]{images/bedproblems.png}
|
||||
\end{wrapfigure}
|
||||
Die Testmenge $T$ wird so gewählt, dass jeder Zweig des Kontrollflusses mindestens einmal durchlaufen wird und alle möglichen Elemente von zusammengesetzten Bedingungen mindestens einmal aktiv sind.\\
|
||||
|
||||
\emph{Pfadüberdeckung:} \\
|
||||
Die Testmenge $T$ wird so gewählt, dass jeder Pfad zwischen initial und Endknoten durchlaufen wird \\- Bestes Testkriterium, aber nicht praktikabel\\
|
||||
- Explosion der Anzahl Pfade bei langen Programmen oder Loops\\
|
||||
|
||||
\section{Softwareprozesse}
|
||||
- Bestimmen was man tut\\
|
||||
- Legen Ordung und Beziehung der Aktivitäten fest\\
|
||||
- Definieren auch WANN man zur nächsten Aktivität übergeht\\
|
||||
\textbf{Tiele: }\\
|
||||
- Standardisierung \\
|
||||
- Vorhersehbarkeit \\
|
||||
- Produktivität \\
|
||||
- Hohe Produktqualität \\
|
||||
- Zeit und Budget Planung\\
|
||||
\textbf{Blackbox Sicht: }
|
||||
- Interaktion mit User nur Am Anfang/Ende
|
||||
- Nicht für Sw. geeignet: Fehler in Anforderungen erst am ende entdeckt; Qualitätseigenschaften nicht v. Produkt ableitbar.
|
||||
\textbf{Whitebox-Sicht:} Interaktion mit USer in jeder Projektphase; eingehen auf Veränderung möglich.
|
||||
\textbf{1st try: Code and Fix: }Unmöglich vorhersagen zu treffen und zu managen.
|
||||
\subsection{Wasserfall Modell}
|
||||
Machbarkeitsstudie>Anforderungsanalyse>Design>Implementation>Integration und Systemtest>Release/Wartung\\
|
||||
• Stark strukturierter Prozess
|
||||
• Dokumentlastig (Dokument nach jeder Phase)
|
||||
• Prozess strukturiert und planbar
|
||||
• Implementation erst wenn Anforderungen verstanden
|
||||
sind
|
||||
\textbf{Wasserfall Modell: Probleme}
|
||||
|
||||
• Änderung in Anforderungen nicht berücksichtigt
|
||||
• Kein Feedback zwischen den Phasen
|
||||
• Keine Parallelisierung
|
||||
• Fixes Einführungsdatum für ganzes System\\
|
||||
Variante 2: Mit Prototyeing während Schritte 1 2 3\\
|
||||
Variante 3: Mit Feedbach an vorige Stufe: REICHT OFT NICHT\\
|
||||
Model war beispiel in Paper Royce, Winston W. "Managing the Development of Large Software Systems", Proceedings of IEEE WESCON 26 (August): 1–9. wie nicht zumachen.
|
||||
\subsection{V-Modell}
|
||||
Wasserfallmodel mot Feedback über mehrere Stufen
|
||||
Ablauf Prozess:
|
||||
1 Anforderungsanalyse $>$ 2 Systementwurf(Architecture)$>$ 3 Programmentwurf $>$ 4 Implementation $>$ 5 Unit/Integrationstest $>$ 6 Systemtest $>$ 7 Akzeptanztest $>$ 8 Release/Wartung\\
|
||||
Feedback: 1-7;2-6;3-5
|
||||
\subsection{Spiralmodel}
|
||||
\includegraphics[width=\linewidth]{images/Spiralmodel_nach_Boehm.png}
|
||||
Risikoabschätzung in jedem Durchlauf.
|
||||
\end{multicols*}
|
||||
\end{document}
|
||||
BIN
thoc/closure.png
Normal file
|
After Width: | Height: | Size: 8.3 KiB |
BIN
thoc/decable.png
Normal file
|
After Width: | Height: | Size: 8.0 KiB |
475
thoc/main.tex
Normal file
@ -0,0 +1,475 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% writeLaTeX Example: A quick guide to LaTeX
|
||||
%
|
||||
% Source: Dave Richeson (divisbyzero.com), Dickinson College
|
||||
%
|
||||
% A one-size-fits-all LaTeX cheat sheet. Kept to two pages, so it
|
||||
% can be printed (double-sided) on one piece of paper
|
||||
%
|
||||
% Feel free to distribute this example, but please keep the referral
|
||||
% to divisbyzero.com
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% How to use writeLaTeX:
|
||||
%
|
||||
% You edit the source code here on the left, and the preview on the
|
||||
% right shows you the result within a few seconds.
|
||||
%
|
||||
% Bookmark this page and share the URL with your co-authors. They can
|
||||
% edit at the same time!
|
||||
%
|
||||
% You can upload figures, bibliographies, custom classes and
|
||||
% styles using the files menu.
|
||||
%
|
||||
% If you're new to LaTeX, the wikibook is a great place to start:
|
||||
% http://en.wikibooks.org/wiki/LaTeX
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
\documentclass[10pt,landscape]{article}
|
||||
\usepackage{amssymb,amsmath,amsthm,amsfonts}
|
||||
\usepackage{multicol,multirow}
|
||||
\usepackage{calc}
|
||||
\usepackage{ifthen}
|
||||
\usepackage[document]{ragged2e}
|
||||
\usepackage{helvet}
|
||||
\renewcommand{\familydefault}{\sfdefault}
|
||||
\usepackage{wrapfig}
|
||||
\usepackage[fontsize=8pt]{fontsize}
|
||||
|
||||
\usepackage[landscape]{geometry}
|
||||
|
||||
\geometry{a4paper, landscape, margin=0.25cm}
|
||||
\usepackage[colorlinks=true,citecolor=blue,linkcolor=blue]{hyperref}
|
||||
\usepackage[
|
||||
protrusion=true,
|
||||
activate={true,nocompatibility},
|
||||
final,
|
||||
tracking=true,
|
||||
kerning=true,
|
||||
spacing=true,
|
||||
factor=1100]{microtype}
|
||||
\SetTracking{encoding={*}, shape=sc}{40}
|
||||
%%Packages added by Sebastian Lenzlinger:
|
||||
\usepackage{enumerate} %% Used to change the style of enumerations (see below).
|
||||
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{lem}{Lemma}
|
||||
\newtheorem{corr}{Corollary}
|
||||
|
||||
\usepackage{tikz} %% Pagacke to create graphics (graphs, automata, etc.)
|
||||
\usetikzlibrary{automata} %% Tikz library to draw automata
|
||||
\usetikzlibrary{arrows} %% Tikz library for nicer arrow heads
|
||||
%%End
|
||||
%\microtypecontext{spacing=nonfrench}
|
||||
|
||||
\ifthenelse{\lengthtest { \paperwidth = 11in}}
|
||||
{ \geometry{top=.5cm,left=.5cm,right=.5cm,bottom=.5cm} }
|
||||
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
|
||||
{\geometry{top=0.3cm,left=0.3cm,right=0.3cm,bottom=0.3cm} }
|
||||
{\geometry{top=0.5cm,left=0.5cm,right=0.5cm,bottom=0.5cm} }
|
||||
}
|
||||
\pagestyle{empty}
|
||||
\makeatletter
|
||||
%% Renew default font
|
||||
|
||||
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
|
||||
{0.1mm}%
|
||||
{0.0001mm}%x
|
||||
{\normalfont\normalsize\bfseries}}
|
||||
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
|
||||
{0.0001mm}%
|
||||
{0.00001mm}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
|
||||
{-1ex plus -.5ex minus -.2ex}%
|
||||
{1ex plus .2ex}%
|
||||
{\normalfont\small\bfseries}}
|
||||
\makeatother
|
||||
\setcounter{secnumdepth}{0}
|
||||
\setlength{\parindent}{0pt}
|
||||
\setlength{\parskip}{0pt plus 0.5ex}
|
||||
|
||||
% -----------------------------------------------------------------------
|
||||
|
||||
\title{Theory of Computer Science}
|
||||
|
||||
\begin{document}
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% Custom Commands
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\renewcommand{\l}[1]{\mathcal{L}(#1)}
|
||||
\newcommand{\s}{\Sigma}
|
||||
\newcommand{\then}{\rightsquigarrow}
|
||||
\renewcommand{\empty}{\varnothing}
|
||||
\newcommand{\any}{$\forall$}
|
||||
\newcommand{\some}{$\exists$}
|
||||
\newcommand{\predux}{$\leq_p$}
|
||||
\newcommand{\tin}{$\in$}
|
||||
\newcommand{\ntin}{$\not\in$}
|
||||
\newcommand{\ffrom}[1]{\stackrel{(#1)}{\Rightarrow}}
|
||||
\raggedright
|
||||
\footnotesize
|
||||
\microtypecontext{spacing=nonfrench}
|
||||
\begin{multicols*}{4}
|
||||
\setlength{\premulticols}{0.1cm}
|
||||
\setlength{\postmulticols}{0.1cm}
|
||||
\setlength{\multicolsep}{0.1cm}
|
||||
\setlength{\columnsep}{0.1cm}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Finite Automata
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Finite Automata}
|
||||
\textbf{From Lang. }over an alphabet $\Sigma$ is a subset of $\Sigma^*$
|
||||
\textbf{DFA} is a 5-tuple $M=\langle Q\text{ the finite set of states}, \Sigma \text{ input alphabet }, \delta: Q \times \Sigma \rightarrow Q\text{ transition function }, q_0 \text{ start state }, F \subseteq Q \text { set of accept states}\rangle$
|
||||
\textbf{Def. } A DFA \emph{accepts} a word $w=a_1...a_2$ if $\exists$ a seq. of states $q'_0,...,q'_n \in Q$, s.t. $q'_0=q_0,\ \delta(q'_{n-1},a_i)=q'_i, \forall i \in \{0,...,n\}$ and $q'_n\in F$.
|
||||
\textbf{Def. } $\mathcal{L}(M)=\{w\in\Sigma^* | w \text { is accepted by } M\}$ is the language \emph{recognized} by $M$
|
||||
\textbf{NFA} is 5-tuple like \emph{DFA}, but $\delta:Q\times (\Sigma \cup \{\epsilon\}) \rightarrow \mathcal{P}(Q)$. Other diffs: $\delta$ can lead to 0 or more succ states for same $a\in\Sigma$. Can take $\epsilon$-transitions w.o. using symbol from input. \emph{NFA accepts} if $\exists$ \emph{at least one} acc. seq. of states.
|
||||
\textbf{$\epsilon$-closure} NFA $M$, $q\in Q$: $p$ is in $\epsilon$-closure $E(q)$ of $q$ \emph{iff.} $\exists$ seq. of states
|
||||
$q_{0ton}$, s.t. (1) $q'_0 = q$ (2) $q'_i \in \delta(q'_{i-1},\epsilon), \forall i\in\{1,..,n\}$ and (3) $q'_n = p$
|
||||
\textbf{NFA accepts} word $w=a_1..a_n$ if $\exists$ seq. of states $q'_{0\ to \ n}$ s.t. (1) $q'_0\in E(q_0)$ (2) $q'_i\in \bigcup_{q\in\delta(q'_{i-1},a_i)} E(q), \forall i \in \{1,..,n\}$ (3) $q'n\in F$
|
||||
\textbf{Thm. } Every lang. recognize by an NFA is also recognized by a DFA.
|
||||
\textbf{Remark } Specification of an automaton is always finite, while the recognized lang can be infinite.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Grammars
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Grammars}
|
||||
\textbf{Grammar} is a 4-tuple $G = \langle V \text{ finite set of \emph{variables} }, \Sigma \text{ finite alphabet of \emph{terminal symbols} with } V\cup\Sigma=\empty, R\subseteq(V\cup\Sigma)^*V(V\cup\Sigma)^* \text{ finite set of \emph{rules} }, S\in V \textit{ start variable}\rangle$
|
||||
\textbf{Derivation} ($u\Rightarrow V)$ word $v\in (V\cup \Sigma)^*$ from $u\in (V\cup \Sigma)^+$ if (1) $u=xyz, v=xy'z$ with $x,z\in (V\cup \Sigma)^*$ (2) $\exists$ rule $y\rightarrow y'\in R$. Write $u\Rightarrow^*v$ if v can be derived from u in finitely many steps.
|
||||
\textbf{Lang. generated} by grammar G, $\mathcal{L}(G)=\{w\in\Sigma^* | S \Rightarrow^* w\}$
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%
|
||||
%% Chomsky Hierarchy
|
||||
%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{G is \emph{type 0}}: all rules allowed
|
||||
\textbf{G is \emph{type 1 (context-sensitive)}}: all rules have form $\alpha B\beta \rightarrow \alpha\beta\gamma$ with $B\in V, \alpha\gamma\in (V\cup\Sigma)^*, \beta\in (V\cup\Sigma)^+$
|
||||
\textbf{G is \emph{type 2 (context-free}}: all rules have from $A\rightarrow w: A\in V, w\in (V\cup\Sigma)^+$
|
||||
\textbf{G is \emph{type 3 (regular)}}: all rules have form $A\rightarrow w: A\in V, w\in \Sigma \cup \Sigma V$
|
||||
\textbf{Regular Grammar: } $S\rightarrow \epsilon$ always allowed if $S$ is start variable and never occurs on right side of any rule
|
||||
\textbf{Type 0-3 Lang} Lang $L\subseteq \Sigma^*$ is type 0/1/2/3 if $\exists$ a type 0/1/2/3 grammar $G$ with $\mathcal{L}(G)=L$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Regular Languages
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Regular Languages}
|
||||
A language is regular if it generated by some regular grammar (i.e. $S\rightarrow\epsilon$ iff $S$ never on RHS). Regular grammars restrict usage of $\epsilon$ rules. But it is not necessary for the characterization of reg langs.
|
||||
\textbf{Thm.} $\forall G=\langle V,\Sigma, R, S\rangle \exists G'=\langle V',\Sigma, R', S\rangle, R'\subseteq (V'\cup \Sigma)^*V'(V'\cup\Sigma)*\times(V'\setminus\{S\}\cup\Sigma)^*$ s.t. $\mathcal{L}(G)=\mathcal{L}(G')$ NOTE: true for all grammars. \textbf{\emph{Proof.}} add new variable $S'\not\in V$ (1) $\forall r\in R$ add a rule $r'$ to R', where $r'$ is result of replacing all occurrences of $S$ in $w$ with $S'$ (2) $\forall\text{rules } S\rightarrow w\in R$ add a rule $S\rightarrow w'$ to $R'$, where $w'$ is result of replacing all occ. of $S$ in $w$ with $S'$. Then $\mathcal{L}(G) = \mathcal{L}(\langle V\cup\{S'\}, \Sigma, R', S\rangle\Box$
|
||||
\textbf{Thm.} $\forall$ grammar $G$ with rules $R\subseteq V \times (\Sigma\cup\Sigma V \cup \{\epsilon\}), \exists$ regular grammar $G'$ with $\mathcal{L}(G)=\mathcal{L}G')$
|
||||
\textbf{Thm.} Every lang recog by DFA is regular.
|
||||
\textbf{Thm.} For every reg grammar G $\exists$ NFA M with $\l{G}=\l{M}$
|
||||
\textbf{Equivalence}$NFA \rightarrow DFA \rightarrow reg \ gram \rightarrow NFA$
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Closure props and decidablity
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%---- Closure
|
||||
\subsection{Def: Closure Props and Dec.ability}
|
||||
\textbf{Concat:} Langs $L_1, L_2$ over $\s_1,\s_2$, concat is $L_1L_2=\{w_1w_2\in (\s_1\cup\s_2)^* | w_1\in L_1, w_2\in L_2$
|
||||
\textbf{Star: } Let $L^0=\{\epsilon\}, L^1=L, L^{i+1}=L^iL\ for \ i\in\mathbf{N}_0$. \emph{star} on $L$: $L^*=\bigcup_{i\geq 0}L^i$
|
||||
\textbf{Closure:} $\mathcal{K}$ class of langs, $\mathcal{K}$ is \emph{closed under $\oplus$} if $L,L'\in\mathcal{K}$ implies $L\oplus L'\in \mathcal{K}$
|
||||
\textbf{Thm. } RL are closed under $\cup,\cap$, complement, concat and star
|
||||
%---- Decidability
|
||||
\textbf{Decidability:} A \emph{decision problem} is a problem where (1) for given \emph{input} (2) an \emph{algo} determines if input has a given \emph{property} (3) then outputs yes/no
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Regular Expressions
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{RL: Regex}
|
||||
\textbf{Regular expressions } over alphabet $\s$ are defined inductively: (1) $\empty, \epsilon$ are a regex (2) If $a \in \s$, then $a$ is a regex (3) If $\alpha, \beta$ are regex, then also $(\alpha\beta)\text{ concat }, (\alpha|\beta) \text{ alternative }, (a^*)\text{ Kleene closure }$
|
||||
\textbf{Conventions:} ommit parenthesis, then kleene binds more than concat bind more than alternative, and parentheses for nested concat/alt are ommited, treat as left-associative, but doesn't matter.
|
||||
\textbf{Lang described by a Regex:} Defined inductively: Assume $\alpha,\beta$ are regex. (1) $\l{\empty}=\empty$ (2) $\l{\epsilon}=\{\epsilon\}$ (3) $\gamma=a,a\in\s,\then\l{\gamma}=\{a\}$ (4) $\gamma=(\alpha\beta)\then\l{\gamma}=\l{\alpha}\l{\beta}$ (5) $\l{(\alpha|\beta)}=\l{\alpha}\cup\l{\beta}$ (6) $\l{(\alpha^*)}=\l{\alpha}^*$
|
||||
\textbf{Thms.} (1) Every \emph{finite} language can be described by a regex. (2) \any lang that can be descr. by a regex, \some NFA accepts it (3) \any lang \emph{recognized} by a DFA can be descr. by a regex (4) The set of langs that can be descr. by regexs are exactly the regular languages!
|
||||
%---- Generalized Nodeterministic Finite Automata
|
||||
\textbf{GNFAs} are like NFAs but transition labals can be arbitrary regex over input alphabet. We use special form: (1) Start state has transition to all others, but no incoming (2) one accept state $\not =$ start state (3) accept state has incomiong trans from every other state, but no outgoing (4) \any other states, one transition goes from every state to every other state \emph{including} self
|
||||
\textbf{GNFA accepts w} if $w=w_1...w_k$ where each $w_i\in\s^*$ and \some seq of states $q_0,...,q_k\in Q$ with (1) $q_0=q_s$ (2) $\forall i: w_i\in\l{R_i(=\delta(q_{i-1},q_i))}$ and (3) $q_k=q_a$
|
||||
\textbf{DFA to GNFA: }(1) add new start state with $\epsilon$-trans to origianl start state (2) add new accept state with $\epsilon$-trans from og. acc. states (4) combine parallel transitions into one, labelled with the alternative of og labels (5) if required trans missing, add labbeled with $\empty$
|
||||
\textbf{GNFA to regex (Algo)} \emph{Convert($M=\langle Q, \s, \delta, q_s, q_a\rangle)$} (1) If $|Q|=2$ return $\delta(q_s,q_a)$ (2) Select any $q\in Q\setminus\{q_s,q_a\}$ and let $M'=\langle Q\setminus \{q\}, \s, \delta', q_s, q_a\rangle$, where $\forall q_i\not = q_a, q_j\not = q_s$ we define $\delta'(q_i,q_j)=(\gamma_1)(\gamma_2)^*(\gamma_3)|(\gamma_4)$, where $\gamma_1=\delta(q_i,q),\gamma_2=\delta(q,q),\gamma_3=\delta(q,q_j),\gamma_4=\delta(q_i,q_j)$
|
||||
\textbf{Thm. }Set of langs that can be descr. by regexs is exaclty the set of RLs.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Pumping Lemma
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{RL: Pumping Lemma}
|
||||
If L is an RL then $\exists p\in\mathbf{N}$ ( a\emph{ pumping number} for L) s.t. $\forall x \in L, |X|\geq p$ can be split into $x=uvw$ with (1) $|v|\geq1$ (2) $|uv|\leq p$ and $uv^iw\in L, \forall i=0,1,2,...$
|
||||
\textbf{Application:} If L is regular, than the pumping lemma holds for L. By Controposition: If PL does not hold for L, then L cannot be regular.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% CF langs
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Context-Free Languages}
|
||||
\textbf{Thm. }\any Context-free grammer G with rules $P\subseteq V\times (V\cup\s)^* \exists \text{ context-free grammar } G', \l{G}=\l{G'}$
|
||||
\textbf{$\epsilon$-Rules: }As with RLs, restriction of $\epsilon$ occurances in rules is not necessary to characterize the set of context-free languages
|
||||
%------ CNF
|
||||
\textbf{CNF: } A CF grammar G is in \emph{Chomsky Normal Form} if all rules have following three forms (1) $A\rightarrow BC, A,B,C$ variables (2) $A\rightarrow a$, var A and terminal $a$ or (3) $S\rightarrow \epsilon$ start variable $S$, i.e. $P\subseteq (V\times (V'V'\cup \s))\cup \{\langle S,\epsilon\rangle\}, V'=V\setminus\{S\}$
|
||||
\textbf{Thm.} \any CF grammar G \some G' in CNF with $\l{G}=\l{G'}$
|
||||
\textbf{Observation} $G$ grammar in CNF and $w\in\l{G}$ non empty word generated by $G$, $\then$ \any derivations of $w$ have exactly $2|w|-1$ derivation steps! \textbf{\emph{Proof.}} \textbf{Base Case} ($|w| = 1$ since $w\not = \epsilon$): Since by construction $w\in\mathcal{L}(G)$, there is some rule $S\rightarrow w \in R$ from the start variable. Since $|w|=1$ we have $w=a\in\Sigma$ for some terminal $a$. We apply this rule in one step, and since $2\cdot |w| - 1 = 2 \cdot 1 - 1 = 1$, the base case holds.\\
|
||||
\textbf{Induction Hypothesis:} Assume the statement holds for any strings $a<k$. We show that this implies that the statement holds for strings of length $k$. Let $w$ be such a string where $|w| = k$. Since $w\in \mathcal{L}(G)$ there is some derivation of $w$ starting from the start variable. We consider the first step of this derivation. Notice that by construction $w\not = \epsilon$, so we are left with two cases:\\
|
||||
Case 1: If we apply a rule $S\rightarrow a$ for some terminal $a$, then $w=a$ and we are in the base case. \\
|
||||
Case 2: We apply a rule of the form $S\rightarrow AB$. Then $|w|\geq2$ and we can partition $w$ in to substrings $w=xy$. Suppose we first derive $A\ffrom{*}x$ and then $B\ffrom{*}y$. Since $|w|=k$ it follows that $|w|=|x|+|y|=k$ where $1\leq|x| < k$ and $1\leq|y|<k$.Since $|x|<k$ and $|y|<k$, we can apply our induciton hypothesis. So we derive
|
||||
Applying our I.H. $\Box$
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% PDAs
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Push-Down Automata}
|
||||
\textbf{PDA} is a 6-tuple $M=\langle Q \text{ finite set of states }, \s \text{ input alphabet }, \Gamma \text{ stack alphabet }, \delta:Q\times (\s\cup\{\epsilon\})\times(\Gamma\cup\{\epsilon\}) \rightarrow \mathcal{P}(Q\times (\Gamma \cup\{\epsilon\})) \text { transition function }, q_0\in Q \text{ start state }, F\subseteq Q \text{ set if accept states}\rangle$
|
||||
\textbf{Intuition $\delta$} (1) $\langle(q',B\rangle \in \delta(q,a,A)$: If M is in state q, reads a and has A as top stack symbol, M \emph{can} trans to q' in next step popping A off stack and pushing B on stack (2) $a=\epsilon$ allowed (spontaneous trans) (3) $A=\epsilon$ allowed (no pop) (4) $B=\epsilon$ allowed (no push)
|
||||
TODO: PDA accepts w b08 p. 13
|
||||
\textbf{Thm.} Lang L is CF iff L is recognized by a PDA.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Closure and decidability
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Deterministic PDA} have restriction $|\delta(q,a,A)| + |\delta(q,\epsilon, A)|\leq 1, \forall q\in Q, a\in \s, A\in \Gamma$.
|
||||
\textbf{Lang recognized by DPDA} are the called \emph{deterministic CF langs} and are \emph{strict} subset of CF langs
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Turing Machines
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Turing Machines I \& II}
|
||||
\textbf{Deterministic Turing Machine} DTM given by 7-tuple $M=\langle Q \text{ set of states }, \s \text{ input alphabet, \emph{not} containing blank symbol } \Box,$
|
||||
\\ $\Gamma \text{ tape alphabet, where } \Box \in \Gamma \land \s\subseteq\Gamma, \delta:(Q\setminus\{q_{acc},q_{rej}\})\times\Gamma \rightarrow Q\times\Gamma\times\{L,R\} \text{ trans func }, q_0 \text{ start state }, q_{acc}, q_{rej}(\not = q_{acc})\rangle$
|
||||
\textbf{Intuition $\delta$:} $\delta(q,a)=\langle q',b, D\rangle$: (1) If $M$ in state $q$, reads $a\then$ (2.1) $M$ trans to state $q'$ in next step (2.2) replaceing $a$ with $b$ and (2.3) moving head in direction $D\in\{L,R\}$ \textbf{\emph{Remark: }} IF head in leftmost state, then going left is no movement!
|
||||
\textbf{Config of a TM} given by triple $c\in \Gamma^* \times Q \times \Gamma^+$. I.e. $(w_1,q,w_2)$ means (1) non empty or already visited part contains word $w_1w_2$ (2) R/W head on first symbol of $w_2$ (3) TM is in state $q$
|
||||
\textbf{DTM accepts word} $w$ if terminates in accept state doing only legal steps
|
||||
\textbf{Lang recognized by DTM M} $\l{M}=\{w\in\s^*|w \text{ is accepted by } M\}$
|
||||
\textbf{Turing-recognizable language }if some \emph{DTM} recognizes it
|
||||
\textbf{Decider} TM that halts (in $q_{acc}$ or $q_{rej}$) on all inputs $\then$ \textbf{T-decidable lang} if some DTM decides it
|
||||
\textbf{Remark:} TMs finite states but \emph{unbounded} tape as "memory"
|
||||
%%%%%%%%%%%%
|
||||
%% TM Variants
|
||||
%%%%%%%%%%%%
|
||||
\subsection{TM Variants}
|
||||
\textbf{Neutral Move} Just add xtra state and go there on neural move then back but without changing tape
|
||||
\textbf{\any Multi Tape TM} has an equivalent single-tape TM
|
||||
\textbf{Thm.} Lang is T-recog iff \some Multi Tape TM recognizes it.
|
||||
\textbf{Nondeterministic TM} Like DTM but (1) $\delta:(Q\setminus\{q_{acc},q_{rej}\})\times\Gamma\rightarrow \mathcal{P}(same as DTM)$ (2) for given input consider computatino tree with branche as different possibilities (3) if \some branch leads to accept, then NTM accepts
|
||||
\textbf{Thm.} \any NTM N \some DTM D: N$\equiv$D
|
||||
$\then$\textbf{Thm. } Lang is turing recog iff \some NTM recogs it
|
||||
\textbf{Remark: } \emph{All variants of TM recognize the same languages}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% TMs vs. Grammars
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{TMs vs. Grammars}
|
||||
\textbf{Linear Bounded Automata} is an NTM M if $\forall q\in Q\setminus\{q_{acc},a_{rej}\}$ and all trans rules $\langle q',c'y\rangle\in\delta(q,\Box)$, we have $c=\Box$!
|
||||
\textbf{Thm.} LBAs recog exaclty the context-sensitive languages
|
||||
\textbf{Thm.} NFAs recog exactly type-0 langs $\then$ \textbf{Corr.} Since DTMs and NTMs recognize same langs type-0 langs are exactly the Turing-recognizablelangs
|
||||
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% TMs and fromal Model of Computation
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Formal Model of Computation}
|
||||
\textbf{Church-Turing Thesis} All functions that can be \emph{computed in the intuitive sense} can be computed by a \emph{TM}.
|
||||
\textbf{Problem} TM have infinite tape. Computers do not. \emph{BUT} A \emph{halting (in particular accepting} computation of a TM only uses \emph{finite} pat of tape. $\then$ If problem undecidable then not solvable with computer
|
||||
\textbf{Encoding a TM as Word:} Encode a TM as word over $\{0,1,\#\}$ \emph{Idea: } (1) $\s$ should always be $\{0,1\}$ (2) enumerate states in Q and symbols in $\Gamma$ as number 0,1,2,... (3) blank symbol is always number 2 (4) start state always number 0, accept state always 1, reject state always no 2 $\then$ Sufficient to only encode $\delta$ explicitly: (1) Q: all states mentioned in the encoding of $\delta$ (2) $\Gamma=\{0,1,\Box,a_3,a_4,...,a_k\}$ where $k$ is largest symbol number mentioned in the $\delta$-rules
|
||||
\textbf{\emph{Encode the rules}} Let $\delta(q_i,a_j)=\langle q_{i'}, a_{j'}, D\rangle$ be a rule in $\delta$, where indecies $i,i',j,j'$ correspond to enumaration of state/symbols and $D\in\{L,R\}$. Encode this rule as: $w_{i,j,i',j',D}=\#\#bin(i)\#bin(j)\#bin(i')\#bin(j')\#bin(m)$ where $m=0(D=L),m=1(D=R)$. Do for each rule, then all these words in (arbitrary) sequence encode the TM.
|
||||
\emph{Last Step: } Transform into word over $\{0,1\}$ with $0\mapsto 00, 1\mapsto 01, \#\mapsto 11$
|
||||
\textbf{TM encoded by a word:} \emph{goal: } function that maps any word in $\{0,1\}^*$ to a TM \emph{problem:} not all words in $\{0,1\}^*$ are encodings of a TM \emph{solution: } Let $\Hat{M}$ be an arbitray fixed DTM. Then: $\forall w\in\{0,1\}^*: M_w=M' \text{ if } w \text{ is the encoding if some DTM } M', \Hat{M} \text{ otherwise}$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Halting Problem
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{The Halting Problem}
|
||||
\textbf{Decidable vs. T-recognizable} A language $L$ is decidable iff \emph{both} $L$ and $\Bar{L}$ are T-recognizable \textbf{\emph{Proof.}} $(\Rightarrow)$: obvious because If TM accepts L, then TM' where accept and reject state are switched accepts $\Bar{L}$. $(\Leftarrow)$: Let $M_L$ be a DTM that recognizes $L$, and let $M_{\Bar{L}}$ be a DTM that recognizes $\Bar{L}$. Algo that decides $L$:
|
||||
On given input word $w$ proceed as follows: FOR $s:=1,2,3,...$: IF $M_L$ stops on $w$ in $s$ steps in the accept state: ACCEPT; IF $M_{\Bar{L}}$ stops on $w$ in $s$ steps in the accept state: REJECT $\Box$
|
||||
\textbf{The Halting Problem} is the lang $H=\{w\#x\in\{0,1,\#\}^* | w,x\in\{0,1\}^*, M_w \text{ started on } x \text{ terminates}\}$
|
||||
\textbf{Thm. } $H$ is T-recognizable \textbf{\emph{Proof.}} TM $U$ recogs $H$: On input $w\#x$: (1) If input has more than one \# REJECT (2) Simulate $M_w$ on $x$ (3) If $M_w$ halts, ACCEPT
|
||||
\textbf{Thm. } $H$ is undecidable
|
||||
\textbf{\emph{Proof.}} Assume $H$ is decidable. Let $D$ be a DTM that decides it. Construct new TM $M$ that takes a word $x\in\{0,1\}^*$ as input: (1) Execute D on input $x\#x$ (2) If rejects: ACCEPT (3) Else: enter endless loop.
|
||||
Let $w$ be the encoding of $M\then$ $M$ run on $w$ stops iff $D$ run on $w\#w$ rejects iff $w\#w\not\in H$ iff $M$ run on $w$ does not stop (reminder: $w$ encodes $M$) \emph{CONTRADICTION} DTM M cannot exist $\Rightarrow$ DTM D cannot exist, hence $H$ is not decidable $\Box$
|
||||
\textbf{Corr.} The complement of the halting problem $\Bar{H}$ is \emph{not T-recognizable}, i.e. \emph{not even} type-0
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Turing-Computability
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Turing-Computability}
|
||||
\textbf{Function (on word) Computed by a TM: }A DTM $M$ \emph{computes} the (partial) function $f:\s^*\rightarrow_p\s^*$ for which $\forall x,y\in\s^*$: $f(x)=y \ iff.\ \langle\epsilon,q_0,x\rangle \vdash^* \langle\epsilon,q_{acc},y\Box...\Box\rangle$ (special case: \\ init config is $\langle\epsilon,q_0,\Box\rangle$ if $x=\epsilon$)
|
||||
\textbf{Turing-Computable:} a partial function is T-Compable if a \some DTM that computes it.
|
||||
\textbf{Encoded Function: } $f:\mathbf{N}^k_0 \rightarrow_p \mathbb{N}_0$ (partial) functions. $f^{code}:\s^*\rightarrow_p\s^*, \s=\{0,1,\#\}, f^{code}(w)=w'$ iff (1) $\exists n_1,...,n_k,n'\in \mathbb{N}_0$ s.t. (2) $f(n_1,...,n_k)=n'$ (3) $w=bin(n_1)\#...\#bin(n_k)$ and (4) $w'=bin(n')$
|
||||
\textbf{T-Comp for numeric functions: } $f:\mathbb{N}^k_0\rightarrow_p\mathbb{N}_0$ is T-Comp if \some DTM that computes $f_{code}$
|
||||
\textbf{Remark: } If the DTM does not stop in a valid config or does not stop at all, then $f(w)$ is undefined for that $w$.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Decidable v computable
|
||||
%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection{Decidable vs. Compatibility}
|
||||
\textbf{Thm.} Lang $L\subseteq\s^*$ is decidable iff $\chi_L:\s^*\rightarrow \{0,1\}$ the \emph{characteristic function of }$L$, is computable, where $\forall w \in \s^*$: $\chi_L(w):= 1(w\in L); 0(w\not\in L)$
|
||||
\textbf{Thm.}Lang $L\subseteq\s^*$ is T-recognizeable iff the following function $\chi'_L:\s^*\rightarrow \{0,1\}$ is computable, where $\forall w \in \s^*$: $\chi'_L(w):= 1(w\in L); undefinded(w\not\in L)$
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Reductions
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Reductions}
|
||||
\textbf{Reduction} of lang $A\subseteq\s^*$ to $B\subseteq\Gamma^*$ (write $A\leq B$), with \emph{total and computable} func $f:\s^*\rightarrow\Gamma$ s.t. $\forall x\in\s^*$: $x\in A \iff f(x)\in B$.
|
||||
\textbf{Thm. } $A, B, A\leq B$. Then: (1) $B$ decidable $\then$ A decidable (2) $B$ T-recable $\then$ $A$ T-recable (3) $A$ not decidable $then$ B not decidable (4) $A$ not T-recable $\then$ $B$ neither.
|
||||
\section{Post Correspondence Problem}
|
||||
\textbf{PCP} \emph{Given:} Finite sequence of pairs of words $(t_1,b_1),(t_2,b_2),...,(t_k,b_k), t_i,b_i\in\s^+$ for arbitrary $\s$. \emph{Question: }\some sequence $i_1,i_2,...,i_n\in\{1,...,k\}, n\geq 1$, with $t_{i1}t_{i2}...t_{in}=b_{i1}b_{i2}...b_{in}$? Solution is called \emph{match}.
|
||||
\textbf{Thm.} PCP is T-recognizable. \textbf{Thm.} PCP is \emph{not} decidable.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Rice's Theorem
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Rice's Theorem}
|
||||
Let $\mathcal{R}$ be the class of all \emph{computable partial functions}. Let $\mathcal{S}$ be an \emph{arbitrary}$\subseteq \mathcal{R}, \mathcal{S}\not= \empty,\mathcal{S}\not = \mathcal{R} $. Then the lang $\mathcal{C}(S) = \{ w \in\{0,1\}^* | (\text{ the (partial) function computed by} M_w) \in \mathcal{S} \}$ is \emph{undecidable}. I.e. every non trivial($\mathcal{S}\not= \empty,\mathcal{S}\not = \mathcal{R} $) property about \emph{what a given} TM computes is undecidable. \textbf{\emph{Consequence: }} Full automization of software verification is impossible!
|
||||
\emph{Some undecidable Grammar Problems: } given CF grammars $G,G'$: $\l{G}\cap\l{G'}=\empty$?$|\l{G}\cap\l{G'}|=\infty$? is $\l{G}\cap\l{G'}$ CF? $\l{G}\subseteq\l{G'}$? $\l{G}=\l{G'}$? given CS grammar $G$: $\l{G}=\empty$?$|l{G}|=\infty$?
|
||||
\textbf{Gödel's First Incompleteness Theorem: } Deciding if a given arithmetic formula is true is undecidable. Actually, neither it nor its complement are T-recognizable. SO, not \some sound and complete proof system for arithmetic formulas!
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Overview Closure and Decidability
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Overview Closure and Decabl:}\emph{Closure:} Intersection: 3,1,0. Union: 3,2,1,0. Comp: 3,1. Concat: 3,2,1,0. Star:3,2,1,0. \emph{Decable:} WordP: 3,2,1. Empti: 3,2. Equiv: 3. Intersec: 3.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Nondeterminbistic Algos, P and Np
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Nondeterministic Algos, P and NP}
|
||||
\textbf{TIME and NTIME} $t:\mathbf{N}\rightarrow\mathbf{R}$: (1) $TIME(t(n))$ is the collection of all langs that are decidable by an $O(t)$ time single tape TM. (2) $NTIME(t(n))$ like $TIME$ but by a \emph{nondeterministic TM}.
|
||||
\textbf{P and NP} $P$ is class of langs decidable in \emph{polynomial time} by a single tape DTM: $P=\bigcup_k TIME(n^k)$. $NP$ like $P$ but $NP = \bigcup_k NTIME(n^k)$.
|
||||
\emph{Remark: } nondeterministic algos can guess (i.e. perform multiple comps "at same time") corresponds to going through all computation branches at same time.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Polynomial Reductions and NP-Completeness
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Poly. Red. and NP-Completeness}
|
||||
\textbf{Polynomial Reduction: } Like general reduction, but aditionally function $f$ needs to be computed in \emph{polynomial time} by a DTM, i.e. \some polynomail $p$ and DTM $M$ s.t. $M$ computesw $f(w)$ in at most $p(|w|)$ steps, given input $w\in\s^*$.
|
||||
Write $A\geq_p B$.
|
||||
\textbf{Properties of poly redux: }Decision probs A,B and C. (1) If A \predux B and B \tin P/NP, then A \tin P/NP. (2) If A \predux B and A \ntin P/NP, then B\ntin NP. (3) If A \predux B and B\predux C, then A \predux C.
|
||||
\textbf{NP-Hard, NP-Complete: } Decision problem B. B \emph{NP-Hard} if A \predux B \any A \tin NP. B \emph{NP-Complete} if B \tin NP and B is NP-Hard. \textbf{\emph{Meaning: }} NP-Hard probs \emph{"at least as difficult"} as all probs in NP. NP-Complete Problems are \emph{"the most difficult"} in NP: all probs in NP can be reduces to them. If \some A\tin P and A is NP-Complete, then P = NP.
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Proving NP-Completeness
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Proving NP-Completeness}
|
||||
\textbf{Thm.} A,B problems s.t.: (1) A is NP-Hard, and (2) A\predux B. Then B is also NP-Hard. If also B\tin NP, then B is NP-Complete.
|
||||
\textbf{SAT} \emph{Given:} prop logic formula $\varphi$ \emph{Question: } Is $\varphi$ satisfiable? \textbf{Cook- Levin Thm. } SAT is NP-Complete.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Proof SAT is NP-Complete
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\subsection*{Proof SAT is NP-Complete}
|
||||
\textbf{SAT \tin NP}: Guess and Check
|
||||
\textbf{SAT is NP-Hard}:
|
||||
We must show A \predux SAT \any A \tin NP. Let A be arb prob in NP. We need to find a poly redux form A to SAT, i.e. a function $f$ \emph{computable} in poly time s.t. for any input w\tin $\text{A}^*$: w\tin A iff f(w) is a satisfiable prop formula. Since A \tin NP, \some NTM M and polynomial p s.t. M \emph{decides} A in time p. \textbf{\emph{IDEA: }} Construct formula that encodes the \emph{possible configurations} which M can reach in time $p(|w|)$ on input w and that \emph{is satisfiable iff an accepting config can be reached} in this time.
|
||||
|
||||
Let M be an NTM for A and let p be a polynomial bounding the computation time for M. W.o.l.g. $p(n)\geq n\forall n$. Let $w=w_1..w_n\in\s^*$ be input for $M$. We number the tape positions with natural numbers s.t. the TM head initially is on pos 1. \emph{Observation:} within $p(n)$ comp steps TM head can only reach positions in the set $Pos=\{1,..,p(n)+1\}$. So only need consider these pos.
|
||||
We encode configs of $M$ by specifzing: (1) state, head pos, symbols on tape at positions Pos (2) since we want to encode full computation we need copies for each comp step (3) only need to consider $Steps=\{0,1,...,p(n)\}$ because M must acc within $p(n)$ steps!
|
||||
For vars in formula $f(w)(t\in Steps, q\in Q, a\in\Gamma)$: (1) $state_{t,q}$ state of TM in $t-th$ config (2) $head_{t,i}$ head pos (3) $tape_{t,i,a}$ tape content.
|
||||
Construct $f(w)$ such that every sat interpretation (1) descr a seq of NTM configs (2) begins in start config (3) reaches acc config (4) follows NTM rules in $\delta$.
|
||||
\textbf{All together:} Set $f(w):= Valid \land Init \land Accept \land Trans$, (1) $f(w)$ can be constructed in time polynomial in $|w|$ (2) $w\in A $ iff M accepts $w$ in $p(|w|)$ steps iff $f(w)$ is satisfiable iff $f(w)\in SAT \then$ A \predux SAT. Since $S\in NP$ arbitrary, this is true for every $A\in NP$ hence $SAT$ is NP hard and also NP-Complete.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Some More NP-Compelete Problems
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Some NP-Complete Problems I \&II}
|
||||
\textbf{Clique:} \emph{G}: undirected graph $\langle V,E \rangle$, number $K \in \mathbf{N_0}$
|
||||
\emph{Q:} Does $G$ have a clique of size at least $K$, i.e., a set of vertices $C\subseteq V$ with $|C| \geq K$ and $\{u,v\} \in E$ for all $u,v \in C$ with $u \not = v$
|
||||
\textbf{IndSet:} \emph{G}: undirected graph $G = \langle V,E \rangle$, number $K \in \mathbb{N_0}$
|
||||
\emph{Q:} Does $G$ have independent set (i.e. vertices not connected by an edge) of at least size $K$? \emph{Clique \predux IndSet:} Use complement graph ($\Bar{G}=(V,\Bar{E})$ of Clique.
|
||||
\textbf{VertexCover:}\emph{G:} undirected graph and natural number $K$. \emph{Q:} Does G have a vertex cover C (i.e. a set of vertices such that every edge of G has at least one vertex in C) of AT MOST(!) size $K$? \emph{IndSet \predux VC:}A set of vertices is a cover iff. its complement is and independent set, i.e $f(\langle G,K\rangle)=\langle G,|V|-K\rangle$.
|
||||
\textbf{(Dir)HamCycle:}\emph{G:} (DIRECTED) graph \emph{Q:}Does G have a Hamilton Cycle (visit each vertex exactly once).
|
||||
\textbf{TSP:}\emph{G:} finite $S\not=\empty$ (Cities), symmetric cost function $cost:S\times S\rightarrow\mathbb{N}_0$, cost bound $K\in\mathbb{N}_0$ \emph{Q:}\some tour with total cost $K$?
|
||||
\textbf{SubsetSum}\emph{G:} numbers $a_1,..,a_k\in\mathbb{N}_0,b\in\mathbb{N}_0$\emph{Q:}
|
||||
\some $J\subseteq\{1,..,k\}:\sum_{i\in J}a_i=b$?
|
||||
\textbf{Partition:}\emph{G:}$a_1,..,a_k\in\mathbb{N}_0$\emph{Q:}\some $J\in\{1,..,k\}:\sum_{i\in j}a_i=\sum_{i\in\{1..k\}\setminus J}a_i$?\emph{SubsetSum\predux Part:} input for Part is input for SS and $(\sum_{i=1}^ka_i=)M+1,2b+1$. Since $M+(M+1)+(2b+1)=2M+1b+2$ a solution partitions in to two subsets with size $M+b+1$. If $J\subseteq\{1,,k\}$ is a subsetsum sol, then $J$ with (index of)$M+1$ is a partition sol.$\Box$
|
||||
\textbf{BinPacking:}\emph{G:}bin size $b$,no of bins $k$,$b,k\in\mathbb{N}_0$, objects $a_1,..,a_n\in\mathbb{N}_0$\emph{Q:}Do the objects fit into the bins?
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Beyond NP
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Beyond NP}
|
||||
\textbf{coNP:} is the set off all languages $L$ and $\overline{L} \in NP$.
|
||||
$P \subseteq coNP$, if $x \in NP-complete \rightarrow \overline{X} \in coNP-complete$, $NP \not = coNp \rightarrow P \not = NP$,$coNP-complete problem \in NP \rightarrow NP = coNP$,$coNP-complete problem \in P \rightarrow P = NP = coNP$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% LOOP and WHILE Computablility
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{LOOP and WHILE Computability}
|
||||
\textbf{Loop:} $x_i := x_j \pm c | i,j \in \mathbb N_0 $ is a Loop-prog (addit/modif-substract)
|
||||
if $P_1 \& P_2 $ loop-prog, then $P_1;P_2$ (compo), loop-prog nested in loop-prog, is still loop-prog. Non-total functions are never Loop-computable.
|
||||
\textbf{While:} add, subtract, concat and nesting same as in loop. $\forall$ loop-comput func is while-comput,$ \not \forall$ while-comput is loop-comput. While is strictly more powerful than loop. Ex. Ackermann function.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% GOTO Comp. and Comparison
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
|
||||
\section{GOTO Computability and Comparison }
|
||||
DTM = While-prog = Goto-prog $\rightarrow$ Equally powerful.
|
||||
Let $f: \mathbb N_0^k \rightarrow _p \mathbb N_0$. f is turing-, while-, goto-compu
|
||||
$\forall$ loop-compu is turing-, while-, goto-compu
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Examples
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section*{Examples}
|
||||
\textbf{RLs}
|
||||
\emph{G:} regular Grammars $G_1,G_2$ \emph{Q:} Is $\mathcal{L}(G_!)=\l{G_2}$ \emph{R:}Generally, for languages $L,L'$ we have $L \subseteq L'$ iff. $L \cap L' = L$. Since $G_1,G_2$ are regular grammars, it follows that $\mathcal{L}(G_1), \mathcal{L}(G_2)$ both are regular languages. Since regular languages are closed under intersection, we can construct the intersection language $L_{\cap}=\mathcal{L}(G_1)\cap \mathcal{L}(G_2)$ and then, using the algorithm for the equivalence problem for regular languages, we check if $\mathcal{L}(G_1) = L_{\cap}$ and propagate the outcome. Since at every step we can use known algorithms, we have found a procedure for our problem and it follows that the problem is decidable $\Box$
|
||||
\textbf{PL:}\emph{Q:} Is $L_1=\{a^nb^mc^{m+n} | n,m\in\mathbb{N}_0\}$ a RL? \emph{R:} Assume $L_1$ is regular. Then let $p$ be a pumping number for $L_1$. The word $a^pb^pc^{p+p}$ is in $L_1$ and has length $\geq p$. Let $x=uvw$ be a split with the properties of the PL. Then $x'=uv^2w$ is also in $L_1$. Since $|uv|\leq p$, we know that $uv$ consists only of $a$s and $x'=a^{|u|}a^{2|v|}a^{p-|uv|}b^{p}c^{2p} = a^{p+|v|}b^pc^{2p}$. Since $|V| \geq 1$ we have that $p+|v| \not = p$ and thus $x'\notin L_1$. This is a contradiction to the PL and we conclude that $L_1$ is not regular.$\Box$
|
||||
\textbf{CFLs Closure and Dec:} \emph{Q:} If $L$ is CF, then $\Bar{L}$ is not CF. \emph{R:}This statement is false. We know that every regular language is also a context free language. Furthermore, we know that the regular languages are closed under complement IE. for any regular language $L_{reg}$, $\overline{L_{reg}}$ is also a regular language. In particular, they are both context free. And the given statement does not hold. $\Box$
|
||||
\emph{Q:} $L=\{a^nb^nb^ma^m|m,n\in\mathbb{N}_0\}$ is CF \emph{R:}From the lecture we know that languages of the form $\{x^zy^z\} | z\in \mathbb N_0$ are context free. Now we can instantiate: $L' = \{a^nb^n\} | n \in \mathbb N_0$ and $L' = \{b^ma^m\} | m \in \mathbb N_0$ are context free. Then $L'L'' = L$ is the concatenation language and since context free languages are closed under concatenation, $L$ is context free.$\Box$
|
||||
\emph{Q:} Given CFGs $G_1,G_2$, "Is $w\in\l{G_1}\cap\l{G_2}$" is decidable. \emph{R:}Notice that $w\in \mathcal{L}(G_1) \cap \mathcal{L}(G_2) \ $iff.$ \ w\in \mathcal{L}(G_1) \ and \ w\in \mathcal{L}(G_2)$. Furthermore, we know that the word problem is decidable for context-free languages. Thus, we first use the algorithm to solve the word problem separately for $\mathcal{L}(G_1)$ and $\mathcal{L}(G_2)$ and return true if $w$ is in both and $false$ otherwise. This algorithm decides $w\in \mathcal{L}(G_1) \cap \mathcal{L}(G_2)$ for context-free grammars $G_1$ and $G_2$.$\Box$
|
||||
%%%%%%%%%%%%%%%%%%
|
||||
%% Proof exp is t-comp
|
||||
%%%%%%%%%%%%%%%%%
|
||||
\textbf{Proof Exponentiation is T-Compable}Consider the following algorithm to compute $exp(n_1,n_2)=n_1^{n_2}$.
|
||||
(1) Check if input on tape is legal ($n_1,n_2>=0$).
|
||||
(2) Write $n_3=1$ on to tape such that $(n_1,n_2,n_3)$ is on the tape.
|
||||
(3) If $n_2=0$, go to 6.
|
||||
(4) Simulate $M_{pred}(n_2)$ and write in to $n_2$.
|
||||
(5) Simulate $M_{mult}(n_1,n_3)$ and write in to $n_3$. Go to 2.
|
||||
(6) Move $n_3$ to beginning of the tape overwriting whats under it and deleting everything after it. Then $n_3=n_1^{n_2}$ is on the tape.$\Box$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Comp of comput func is comput
|
||||
%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Composition of Comp Functions}Since $g$ and $f$ are computable, there are TMs that compute them. Call them $C_g$ and $C_f$. The following TM computes $(f \circ g)(x): \Sigma^*_1 \rightarrow_p \Sigma^*_3$:\\
|
||||
$C_{fg}$ = On input $\langle\langle C_f,C_g,x\rangle\rangle$:
|
||||
(1)If the input is malformed, reject. (Can not really happen, we assumed well formed input.)
|
||||
(2) Run $C_g$ on $x$ and store it on tape where $\langle C_g, x\rangle$ was, such the tape contents are $\langle\langle C_f,C_g(x)\rangle\rangle$ If it rejects or is undefined, reject.
|
||||
(3)Run $C_f$ on the above result. Then we have $\langle\langle C_f(C_g(x))\rangle\rangle$ on the tape.
|
||||
(4) Check if what is on the tape represents anything but a correct result of computing. If not, move into a reject state.
|
||||
(5) Format tape content to match the definition of computing a function value of a computable function.
|
||||
(6) Move into an accept state.
|
||||
$C_{fg}$ computes $(f\circ g)(x)$, such that in the end we have $\langle\langle f(g(x))\rangle\rangle$ on the tape, and is undefined when $g(x)$ is undefined or $f(g(x))$ is undefined. $\Box$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Trans of Redux
|
||||
%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Transitivity of Redux:}Let $A\subseteq\Sigma_A^*$, $B\subseteq\Sigma_B^*$ and $C\subseteq \Sigma_C^*$ for some arbitray sets of symbols $\Sigma_A, \Sigma_B$ and $\Sigma_C$. Furthermore assume that $A\leq B$ and $B\leq C$.
|
||||
Since $A\leq B$, there is some total computable function $f:\Sigma_A^*\rightarrow \Sigma_B^*$.
|
||||
And since $B\leq C$, there is some total computable function $g:\Sigma_B^*\rightarrow\Sigma_C^*$. Note that any total function is also a partial function over the same domain.
|
||||
Now consider the following function $g\circ f:\Sigma_A^* \rightarrow \Sigma_C^*$. This function is computable, since the compositon of computable, partial functions is also a computable, partial function. In particular this holds for total functions. Additionally the following must hold: $x\in A\text{ iff. }g(f(x))\in C$. We distinguish two cases:\\
|
||||
\textbf{Case 1:} If $x\not\in A$, then from $A\leq B$, $f(x)\not\in B$. Therefore, since $B\leq C$ we get that $g(f(x))\not\in C$.\\
|
||||
\textbf{Case 2:} If $x\in A$, then, analogous to Case 1, $f(x)\in B$. It follows that since $B\leq C$, $g(f(x))\in C$.\\
|
||||
Both cases are well defined because of the totality of $f$ and $g$ and $x\in A \text{ iff. } g(f(x))\in A$, given- $A\leq B$ and $B\leq C$.$\Box$
|
||||
%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Using RT
|
||||
%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Using Rice's Theorem:}
|
||||
(1)$L=\{w\in\{0,1\}^*|M_w$\\$ \text{ computes unary func over nat nos which is undef on input 0}\}$ \emph{Sol:} Use RT with $\mathbf{S}=\{f\in\mathbf{R}|f:\mathbb{N}_0\rightarrow\mathbb{N}_0, f(0)=undef\}$
|
||||
(2)$L = \{ w \in \{0,1\}^* \mid M_w \text{ halts on all inputs} \} $\emph{Sol:} RT not applicable, but if was asked to halt also in valid config, then use set of all total comp funcs.
|
||||
(3)$L = \{ w \in \{0, 1, \}^* \mid M_w \text{ always halts after an even number of steps} \}$ \emph{Sol:} RT not app since even no of steps prop of computation, not computed func.
|
||||
(4) $L = \{ w \in \{0, 1, \}^* \mid M_w \text{ computes the binary multiplication function } \text{mul} : \mathbb{N}_0^2 \rightarrow \mathbb{N}_0 \text{ with } \text{mul}(x, y) = x \cdot y \}$\emph{Sol:} RT applicable with $\mathbf{S}=\{mul\}$.
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Undecidability of emptyness problem
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{Undec if Emptiness problem:}\emph{Def:} Given general (type-0) grammar $G$, is $\l{G}=\empty$? \emph{Proof:} Use $\text{ComputesUndef} = \{ w \in \{0, 1\}^* \mid \text{DTM } M_w \text{ computes the partial function } \Omega \}$ $\Omega$ is part func undef on all input words. it is computable with DTM that always enters infinite loop. With $\mathbf{S}=\{\Omega\}$ and RT it is undec. Now $CompUndef\leq Emptyness$:
|
||||
M:
|
||||
Input $w \in \{0, 1\}^*$:
|
||||
Sim $M_w$ on input $x$.
|
||||
If the sim rejects or does not terminate, $M$ does the same.
|
||||
If the sim of $M_w$ on $x$ accepts, $M$ then checks if the tape content
|
||||
and head position correspond to a configu of a valid computation result.
|
||||
If yes, $M$ accepts $x$; otherwise, it rejects.
|
||||
$f(w) = \text{the grammar equivalent to the comp func from hint}$
|
||||
where $f(w)$ computes $\Omega$
|
||||
Redux $f$ is total and computable. And, it holds that
|
||||
$w \in \text{ComputesUndef} \iff M_w$ computes $\Omega$
|
||||
$\iff M$ does not accept any input
|
||||
$\iff L(M) = \emptyset \iff L(f(w)) = \emptyset \iff f(w) \in \text{Emptiness}$
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Nodeterministic Algos
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\textbf{HittingSet}\emph{G:} finit $U\not=\empty$, finite set of sets $S=\{S_1,..,S_n\},S_i\subseteq U \forall S_i\in S$, $k\in\mathbb{N}_0$ \emph{Q:}$\exists H\subseteq U, |H|\leq k, s.t. \ \S_i\cap H\not=\empty \forall S_i\in S$?
|
||||
\textbf{HS G\& Check algo:} In: $U, S_i \ for \ i\in\{1,..,n\},k\then$ $rem:= S$; FOR $i=1$ TO $k$:\{ GUESS $e\in U$; $rem:= rem\setminus\{S_i|S_i\in S, e \in S_i\};$\} IF $rem==\empty$:\{ACCEPT;\} REJECT $\Box$
|
||||
\textbf{SetPacking is NP-Compete:}
|
||||
\emph{G:} fin set $M$, Set of Sets $\mathbf{S}=\{S_1,..,S_n\},S_i\subseteq M \forall i\in\{1,..,n\}$, $k\in\mathbb{N}_0$
|
||||
\emph{Q:} \some $\mathbf{S}'\subseteq\mathbf{S}, |\mathbf{S}'|\geq k$ s.t. \any set in $\mathbf{S}' $ are pairwise disjoint.
|
||||
\emph{SOL}(1) SetPacking is in NP: Input $S,k$: $\mathbf{S}' =\empty $; FOR $i=1$ TO $k$: \{ GUESS $S\in \mathbf{S}$; if $S\in\mathbf{S}'$: REJECT;
|
||||
FOR $S'\in \mathbf{S} '$: \{ IF $S\cap S'$: REJECT\}; $\mathbf{S} ':= \mathbf{S} ' \cup \{S\}$ END;\} ACCEPT$\Box$
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\end{multicols*}
|
||||
\end{document}
|
||||