diff --git a/Systemsicherheit - Cheatsheet.pdf b/Systemsicherheit - Cheatsheet.pdf index 41e1273..dfdf1a7 100644 Binary files a/Systemsicherheit - Cheatsheet.pdf and b/Systemsicherheit - Cheatsheet.pdf differ diff --git a/Systemsicherheit - Cheatsheet.tex b/Systemsicherheit - Cheatsheet.tex index 82be009..a79f921 100644 --- a/Systemsicherheit - Cheatsheet.tex +++ b/Systemsicherheit - Cheatsheet.tex @@ -12,6 +12,7 @@ \usepackage{mdwlist} %less space for lists \usepackage{pdflscape} \usepackage{verbatim} +\usepackage[most]{tcolorbox} \usepackage[hidelinks,pdfencoding=auto]{hyperref} \usepackage{fancyhdr} \usepackage{lastpage} @@ -46,6 +47,30 @@ } \lstset{style=mystyle, upquote=true} +%textmarker style from colorbox doc +\tcbset{textmarker/.style={% + enhanced, + parbox=false,boxrule=0mm,boxsep=0mm,arc=0mm, + outer arc=0mm,left=2mm,right=2mm,top=3pt,bottom=3pt, + toptitle=1mm,bottomtitle=1mm,oversize}} + +% define new colorboxes +\newtcolorbox{hintBox}{textmarker, + borderline west={6pt}{0pt}{yellow}, + colback=yellow!10!white} +\newtcolorbox{importantBox}{textmarker, + borderline west={6pt}{0pt}{red}, + colback=red!10!white} +\newtcolorbox{noteBox}{textmarker, + borderline west={3pt}{0pt}{green}, + colback=green!10!white} + +% define commands for easy access +\renewcommand{\note}[2]{\begin{noteBox} \textbf{#1} #2 \end{noteBox}} +\newcommand{\warning}[1]{\begin{hintBox} \textbf{Warning:} #1 \end{hintBox}} +\newcommand{\important}[1]{\begin{importantBox} \textbf{Important:} #1 \end{importantBox}} + + % This sets page margins to .5 inch if using letter paper, and to 1cm % if using A4 paper. (This probably isn't strictly necessary.) % If using another size paper, use default 1cm margins. @@ -101,87 +126,35 @@ \setlength{\multicolsep}{1pt} \setlength{\columnsep}{2pt} - \section{Introduction} - Critical Properties + Goal of IT Security \textbf{Reduction of Operational Risks of IT Systems} \begin{itemize*} - \item Security + Safety - \item Reliability - \item Correctness - \item Availability - \item Real Time - \item Scalability + \item Reliability \& Correctness + \item Real Time \& Scalability \item Openness - \end{itemize*} - - Responsibility for risks -> guaranteed properties! - - Relevance of Security: Security properties if any IT system are mission-critial - independet of its application domain - - \subsection{Security Goals} - Our Faculty's Education and Examination Management System - \begin{itemize*} - \item Maintains: - \begin{itemize*} - \item Course profiles (examination form/date, credit points) - \item Students records (personal data, registration to examinations, grades) - \end{itemize*} - \item Services: - \begin{itemize*} - \item Enrolment/expulsion of students - \item Registration to examination - \item Registration of examination marks - \item Information and attestations desk - \end{itemize*} - \item Operational Risks - \begin{itemize*} - \item Conditio sine qua non: Provability of information properties - \item Fake registration to examinations: integrity, non-repudiability ("nicht-abstreitbar") - \item Leakage of grades, personal data: confidentiality, integrity - \item Forgery of attestations: authenticity, integrity - \end{itemize*} - \end{itemize*} - - Industry Control Systems - \begin{itemize*} - \item e.g. Factorys, energy and water plants (public infrastructure) - \begin{itemize*} - \item "Chinese Hacking Team Caught Takin over decoy water plant" - \item "Internet Attack shuts off the Heat in Finland" - \end{itemize*} - \item Operational risks: Integrity \& Availability of public community support systems - \end{itemize*} - - \subsubsection{Message} - \begin{itemize*} - \item Goal of IT Security: **Reduction of Operational Risks of IT Systems** - \item Elementary: Protection of - \begin{itemize*} - \item Confidentiality - \item Integrity - \item Availability - \item Non-repudiability - \end{itemize*} + \item Conditio sine qua non: Provability of information properties + \item non-repudiability ("nicht-abstreitbar") \end{itemize*} Specific Security Goals (Terms) \begin{itemize*} - \item **Confidentiality**: the property of information to be available only to anauthorized user group - \item **Integrity**: the property of information to be protected against unauthorized modification - \item **Availability**: the property of information to be available in an reasonable time frame - \item **Authenticity**: the property to be able to identify the author of an information - \item **Non-repudiability**: the combination of integrity and authenticity + \item \textbf{Confidentiality} the property of information to be available only to anauthorized user group + \item \textbf{Integrity} the property of information to be protected against unauthorized modification + \item \textbf{Availability} the property of information to be available in an reasonable time frame + \item \textbf{Authenticity} the property to be able to identify the author of an information + \item \textbf{Non-repudiability} the combination of integrity and authenticity + \item \textbf{Safety} To protect environment against hazards caused by system failures + \begin{itemize*} + \item Technical failures: power failure, ageing, dirt + \item Human errors: stupidity, lacking education, carelessness + \item Force majeure: fire, lightning, earth quakes + \end{itemize*} + \item \textbf{Security} To protect IT systems against hazards caused by malicious attacks + \begin{itemize*} + \item Industrial espionage, fraud, blackmailing + \item Terrorism, vandalism + \end{itemize*} \end{itemize*} - \begin{tabular}{c| l | l} - & Safety & Security \\\hline - Goal & To protect environment against hazards caused by system failures & To protect IT systems against hazards caused by malicious attacks \\ - & Technical failures: power failure, ageing, dirt & Industrial espionage, fraud, blackmailing \\ - & Human errors: stupidity, lacking education, carelessness & Terrorism, vandalism \\ - & Force majeure: fire, lightning, earth quakes & \\ - => making sure things work & in the presence of system failures & in the face of an intelligent and maliciousadversary - \end{tabular} - - \subsection{Security Engineering} Security Goals in Practice \begin{itemize*} \item ... are diverse and complex to achieve @@ -189,78 +162,65 @@ \item ... involve cross-domain expertise \end{itemize*} - Security Engineering: + Security Engineering \begin{itemize*} \item Is a methodology that tries to tackle this complexity. - \item Goal: Engineering IT systems that are *secure by design*. - \item Approach: Stepwise increase of guarantees -> formal methods required! + \item Goal: Engineering IT systems that are secure by design. + \item Approach: Stepwise increase of guarantees \end{itemize*} - Steps in Security Engineering: - %![](Assets/Systemsicherheit-engineering-process.png) - - \subsection{Lecture Roadmap} - 1. Security Requirements: Vulnerabilites, Threats, Risks - 2. Security Policies and Models: Access Control, Information Flow, Non-Interference - 3. Practical Security Engineering: Model Engineering, Model, Specification, Model Implementation - 4. Security Mechanisms: FYI Authorization, Authentication, Cryptography - 5. Security Architectures: TCBs and Reference Monitors, Nizza,SELinux, Kerberos - + Steps in Security Engineering + \includegraphics[width=\linewidth]{Assets/Systemsicherheit-engineering-process.png} \section{Security Requirements} - \subsection{Motivation} Goal of Requirements Engineering: - - Methodology for - \begin{itemize*} - \item identifying - \item specifying - \end{itemize*} - - the desired security properties of an IT system. + Methodology for identifying and specifying the desired security properties of an IT system. Result: \begin{itemize*} - \item Security requirements, which definewhatsecurity properties a system should have. - \item These again are the basis of asecurity policy: Defineshowthese properties are achieved + \item Security requirements, which define what security properties a system should have. + \item These again are the basis of a security policy: Defines how these properties are achieved \end{itemize*} Influencing Factors \begin{itemize*} \item Codes and acts (depending on applicable law) - \begin{itemize*} - \item EU General Data Protection Regulation (GDPR) - \item US Sarbanes-Oxley Act (SarbOx) - \end{itemize*} + \begin{itemize*} + \item EU General Data Protection Regulation (GDPR) + \item US Sarbanes-Oxley Act (SarbOx) + \end{itemize*} \item Contracts with customers \item Certification - \begin{itemize*} - \item For information security management systems (ISO 27001) - \item Subject to German Digital Signature Act (Signaturgesetz), toCommon - \end{itemize*} + \begin{itemize*} + \item For information security management systems (ISO 27001) + \item Subject to German Digital Signature Act (Signaturgesetz) + \end{itemize*} \item Criteria \item Company-specific guidelines and regulations - \begin{itemize*} - \item Access to critical data - \item Permission assignment - \end{itemize*} + \begin{itemize*} + \item Access to critical data + \item Permission assignment + \end{itemize*} \item Company-specific infrastructure and technical requirements - \begin{itemize*} - \item System architecture - \item Application systems (such as OSs, Database Information Systems) - \end{itemize*} + \begin{itemize*} + \item System architecture + \item Application systems (OSs, Database Information Systems) + \end{itemize*} \end{itemize*} General Methodology: How to Come up with Security Requirements Specialized steps in regular software requirements engineering: - 1. Identify and classifyvulnerabilities. - 2. Identify and classifythreats. - 3. Match both, where relevant, to yieldrisks. - 4. Analyze and decide which risks should bedealt with. - -> Fine-grained Security Requirements + \begin{enumerate*} + \item Identify and classifyvulnerabilities. + \item Identify and classifythreats. + \item Match both, where relevant, to yieldrisks. + \item Analyze and decide which risks should bedealt with. + \end{enumerate*} + $\rightarrow$ Fine-grained Security Requirements + + \includegraphics[width=\linewidth]{Assets/Systemsicherheit-risk.png} - %![](Assets/Systemsicherheit-risk.png) \subsection{Vulnerability Analysis} Goal: Identification of \begin{itemize*} @@ -269,254 +229,139 @@ \item human \end{itemize*} vulnerabilities of IT systems. - > Vulnerability - > - > Feature of hardware and software constituting, an organization running, or a human operating an IT system, which is a necessary precondition for any attack in that system, with the goal to compromise one of its security properties. Set of all vulnerabilities = a system’sattack surface. + + \note{Vulnerability}{Feature of hardware and software constituting, an organization running, or a human operating an IT system, which is a necessary precondition for any attack in that system, with the goal to compromise one of its security properties. Set of all vulnerabilities = a system’sattack surface.} \subsubsection{Human Vulnerabilities} - Examples: \begin{itemize*} \item Laziness - \begin{itemize*} - \item Passwords on Post-It - \item Fast-clicking exercise: Windows UAC pop-up boxes - \end{itemize*} + \begin{itemize*} + \item Passwords on Post-It + \item Fast-clicking exercise: Windows UAC pop-up boxes + \end{itemize*} \item Social Engineering - \begin{itemize*} - \item Pressure from your boss - \item A favor for your friend - \item Blackmailing: The poisoned daughter, ... - \item An important-seeming email - \end{itemize*} + \begin{itemize*} + \item Pressure from your boss + \item A favor for your friend + \item Blackmailing: The poisoned daughter, ... + \end{itemize*} \item Lack of knowledge - \begin{itemize*} - \item Importing and executing malware - \item Indirect, hidden information flowin access control systems - \end{itemize*} + \begin{itemize*} + \item Importing and executing malware + \item Indirect, hidden information flowin access control systems + \end{itemize*} \end{itemize*} - > Social Engineering - > - > Influencing people into acting against their own interest or the interest of an organisation is often a simpler solution than resorting to malware or hacking. - > Both law enforcement and the financial industry indicate that social engineering continues to enable attackers who lack the technical skills, motivation to use them or the resources to purchase or hire them. Additionally, targeted social engineering allows those technically gifted to orchestrate blended attacks bypassing both human and hardware or software lines of defence. [Europol](https://www.europol.europa.eu/crime-areas-and-trends/crime-areas/cybercrime/social-engineering) + \note{Social Engineering}{Influencing people into acting against their own interest or the interest of an organisation is often a simpler solution than resorting to malware or hacking. + %Both law enforcement and the financial industry indicate that social engineering continues to enable attackers who lack the technical skills, motivation to use them or the resources to purchase or hire them. Additionally, targeted social engineering allows those technically gifted to orchestrate blended attacks bypassing both human and hardware or software lines of defence. + } + \subsubsection{Indirect Information Flow in Access Control Systems} - \paragraph{Indirect Information Flow in Access Control Systems} - A More Detailed Scenario - \begin{itemize*} - \item AlphaCompany has two departments: Research \& Development(R\&D) and Sales - \item Ann is project manager and Bob is developer working in R\&D on ProjectX, Chris is a busybody sales manager writing a marketing flyer about ProjectX - \item All R\&D developers communicate via an electronic bulletin board, including any preliminary product features not yet ready for release - \item Bob is responsible for informing sales about release-ready features, using ashared web document - \end{itemize*} - > Security Requirement - > - > No internal information about a project, which is not approved by the project manager, should ever go into the product flyer. + \note{Security Requirement}{No internal information about a project, which is not approved by the project manager, should ever go into the product flyer.} - - Access Control Configuration - \begin{itemize*} - \item 3 users:ann,bob,chris - \item 2 groups: - \begin{itemize*} - \item crewx: ann, bob, ... - \item sales: ann, bob - \end{itemize*} - \item Settings: - \begin{lstlisting}[ - language=Bash, - showspaces=false, - basicstyle=\ttfamily, - commentstyle=\color{gray} - ] - drw\item --\item --\item 1 ann crewx 2020-04-14 15:10 ProjectXFiles - -rw\item r-\item --\item 1 ann crewx 2020-04-14 15:10 ProjectXBoard - -rw\item r-\item --\item 1 bob sales 2020-04-14 14:22 NotesToSales - -rw\item --\item --\item 1 chris sales 2020-04-13 23:58 SalesFlyer.pdf - \end{lstlisting} - \item Result: - \begin{itemize*} - \item all users apparently set their permissions perfectly - from their own point of view - \item all three together createda severe information flow vulnerability... - \end{itemize*} - \item Ann has read access to the folder ProjectX Files - \item Ann legitimately writes news from these files to the ProjectX Board - \item Bob legitimately updates NotesToSales with these news - \item Human vulnerability: Bob’s laziness, friendship with Chris, blackmailing by Chris, ... (see above) make him write about unapproved new features - \item -> Chris misuses this information in the Sales Flyer... - \end{itemize*} - - > Forbidden Information Flow - > - > Internal information about ProjectX goes into the product flyer! + \note{Forbidden Information Flow}{Internal information about ProjectX goes into the product flyer!} Problem Analysis: \begin{itemize*} \item Limited knowledge of users - \begin{itemize*} - \item limited horizon: knowledge about the rest of a system configuration for making a sound decision about permissions - \item limited problem awareness: see "lack of knowledge" - \item limited skills - \end{itemize*} - \item Problem complexity -> effects ofindividualpermission assignments by users - (= discretionary) tosystem-widesecurity properties + \begin{itemize*} + \item limited horizon: knowledge about the rest of a system + \item limited problem awareness: see "lack of knowledge" + \item limited skills + \end{itemize*} + \item Problem complexity $\rightarrow$ effects of individual permission assignments by users to system-wide security properties \item Limited configuration options and granularity: archaic and inapt security mechanisms in system and application software - \begin{itemize*} - \item no isolation of non-trusted software - \item no enforcement of global security policies - \end{itemize*} - \item -> Effectiveness of discretionary access control (DAC), configured by users? + \begin{itemize*} + \item no isolation of non-trusted software + \item no enforcement of global security policies + \end{itemize*} + \item $\rightarrow$ Effectiveness of discretionary access control (DAC) \end{itemize*} \subsubsection{Organizational Vulnerabilities} - Examples: \begin{itemize*} \item Access to rooms (servers!) \item Assignment of permission on organizational level, e. g. - \begin{itemize*} - \item 4-eyes principle - \item need-to-know principle - \item definition of roles and hierarchies - \end{itemize*} + \begin{itemize*} + \item 4-eyes principle + \item need-to-know principle + \item definition of roles and hierarchies + \end{itemize*} \item Management of cryptographic keys - \begin{itemize*} - \item -> e. g. for issuing certificates - \end{itemize*} - \item -> Master course on "IT-Sicherheitsmanagement" (in German) \end{itemize*} \subsubsection{Technical Vulnerabilities} The Problem: Complexity of IT Systems \begin{itemize*} - \item ... will in foreseeable timenotbe - \item Completely, consistently, unambiguously, correctly specified - \begin{itemize*} - \item -> contain specification errors - \end{itemize*} - \item Correctly implemented - \begin{itemize*} - \item -> contain programming errors - \end{itemize*} - \item Re-designed on a daily basis(many security mechanisms of today’s systems are older than 40 years) - \begin{itemize*} - \item -> contain conceptual weaknesses and vulnerabilities - \end{itemize*} + \item ... will in foreseeable time not be + \item Completely, consistently, unambiguously, correctly specified $\rightarrow$ contain specification errors + \item Correctly implemented $\rightarrow$ contain programming errors + \item Re-designed on a daily basis $\rightarrow$ contain conceptual weaknesses and vulnerabilities \end{itemize*} - \paragraph{Buffer Overflow Attacks} - Example for Exploitation of Implementation Errors - - in privileged system software: + \subsubsection{Buffer Overflow Attacks} + Privileged software can be tricked into executing attacker’s code. + Approach: Cleverly forged parameters overwrite procedure activation frames in memory \begin{itemize*} - \item Operating Systems (OSs) - \item SSH demons - \item Web servers - \item Database servers + \item $\rightarrow$ exploitation of missing length checks on input buffers + \item $\rightarrow$ buffer overflow \end{itemize*} - - Consequence: Privileged software can be tricked into executing attacker’s code - - Approach: Cleverly forged parameters overwrite procedure activation frames in - memory - \begin{itemize*} - \item -> exploitation of missing length checks on input buffers - \item -> buffer overflow - \end{itemize*} - What an Attacker Needs to Know - - \paragraph{Necessary Knowledge and Skills} \begin{itemize*} - \item Source code of the target program (e. g. a privileged server), obtained by disassembling - \item Better: symbol table, as with an executable not stripped from debugging information - \item Even better: most precise knowledge about the compiler used w.r.t. runtime management - \begin{itemize*} - \item how call conventions affect the stack layout - \item degree to which stack layout is deterministic, which eases experimentation - \end{itemize*} + \item Source code of the target program, obtained by disassembling + \item Better: symbol table, as with an executable + \item Even better: most precise knowledge about the compiler used + \begin{itemize*} + \item how call conventions affect the stack layout + \item degree to which stack layout is deterministic + \end{itemize*} \end{itemize*} - Sketch of the Attack Approach (Observations during program execution) \begin{itemize*} \item Stack grows towards the small addresses - \begin{itemize*} - \item -> small whenever a procedure is called, all its information is stored in aprocedure frame = subsequent addresses below those of previously stored procedure frames - \end{itemize*} \item in each procedure frame: address of the next instruction to call after the current procedure returns (ReturnIP) - \item after storing the ReturnIP, compilers reserve stack space for local variables -> these occupy lower addresses + \item after storing the ReturnIP, compilers reserve stack space for local variables $\rightarrow$ these occupy lower addresses \end{itemize*} - - \paragraph{Preparing the Attack} - Attacker carefully prepares an input argument msg:`0 ...0 /bin/shell\#system ` - - \begin{lstlisting}[ - language=C++, - showspaces=false, - basicstyle=\ttfamily, - commentstyle=\color{gray} - ] - void processSomeMsg(char *msg, int msgSize){ - char localBuffer[1024]; - int i=0; - while (i ReturnIP - \end{itemize*} - \item After finishing processSomeMsg: victim program executes code at address of ReturnIP =address of a forged call to execute arbitrary programs! - \item Additional parameter to this call: file system location of a shell + \item Attacker makes victim program overwrite runtime-critical parts of its stack + \begin{itemize*} + \item by counting up to the length of msg + \item at the same time writing back over previously save runtime information $\rightarrow$ ReturnIP + \end{itemize*} + \item After finish: victim program executes code at address of ReturnIP (=address of a forged call to execute arbitrary programs) + \item Additional parameter: file system location of a shell \end{itemize*} - > Security Breach - > - > The attacker can remotely communicate, upload, download, and execute anything- with cooperation of the OS, since all of this runs with the original privileges of the victim program! + \note{Security Breach}{The attacker can remotely communicate, upload, download, and execute anything- with cooperation of the OS, since all of this runs with the original privileges of the victim program!} - - \subsubsection{Summary} - Vulnerabilities + \subsubsection{Summary - Vulnerabilities} \begin{itemize*} \item Human - \begin{itemize*} - \item Laziness - \item Social engineering - \item Lack of knowledge (e. g. malware execution, DAC shortcoming) - \end{itemize*} + \begin{itemize*} + \item Laziness + \item Social engineering + \item Lack of knowledge (e.g. malware execution) + \end{itemize*} \item Organizational - \begin{itemize*} - \item Key management - \item Physical access to rooms, hardware - \end{itemize*} + \begin{itemize*} + \item Key management + \item Physical access to rooms, hardware + \end{itemize*} \item Technical - \begin{itemize*} - \item Weak security paradigms - \item Specification and implementation errors - \end{itemize*} - \item -> A whole zoo of vulnerabilities! - \end{itemize*} - - How can we identify all during systems design and engineering...? - \begin{itemize*} - \item Vulnerabilities catalogues: ISO 27001, ISO 27002 - \item Vulnerabilities databases, such as CVE - \item Tools (we will see...) + \begin{itemize*} + \item Weak security paradigms + \item Specification and implementation errors + \end{itemize*} \end{itemize*} \subsection{Threat Analysis} Goal: Identification of \begin{itemize*} \item Attack objectives and attackers - \item Attack methods and practices (a.k.a. "Tactics, Techniques, and Procedures (TTPs)") - \item -> know your enemy + \item Attack methods and practices (Tactics, Techniques) + \item $\rightarrow$ know your enemy \end{itemize*} Approach: Compilation of a threat catalog, content: @@ -527,518 +372,264 @@ \item damage potential of attacks \end{itemize*} - \subsubsection{Attack Objectives and Attackers} - Attack Objectives \begin{itemize*} - \item Economical and political power - \item Profit - \item Wreak havoc (energy infrastructure, water plants, air traffic ...) - \item Meet a challenge - \end{itemize*} - - Attackers - \begin{itemize*} - \item Professional organizations (which may be hired by anyone, incl. competitors or governments) - \item Active and former employees ("Remember that IT guy we fired last year ...?") - \item Terrorists - \item Hackers (both good or evil) - \end{itemize*} - - Examples - \begin{itemize*} - \item Economic Espionage - \item Objective: economic and political power, profit - \item Victims: high tech industry(companies that rely on the secrecy of their know-how to successfully compete) - \item Attackers: - \begin{itemize*} - \item Competitors, (foreign) governments -> professional organizations - \item Insiders - \begin{itemize*} - \item regular, often privileged users of IT systems - \item statistically large share(> 40 %) - \item often indirect -> social engineering ("Only amateurs target systems; professional target people.") - \item statistical profile: age 30-40, executive function (department heads, system administrators, lead programmers, ...) - \item weapons: technical and organisational insider knowledge, technical skills - \item -> Your own people. - \end{itemize*} - \end{itemize*} + \item Economic Espionage and political power + \begin{itemize*} + \item Victims: high tech industry + \item Attackers: + \begin{itemize*} + \item Competitors, governments, professional organizations + \item Insiders + \item regular, often privileged users of IT systems + \end{itemize*} + \item often indirect $\rightarrow$ social engineering + \item statistical profile: age 30-40, executive function + \item weapons: technical and organisational insider knowledge + \item damage potential: Loss of control over critical knowledge $\rightarrow$ loss of economical or political power + \end{itemize*} \item Personal Profit - \begin{itemize*} - \item Objective: becoming rich(er)(expensive life style, ambitious projects, medical conditions) - \item Attackers: - \begin{itemize*} - \item Competitors - \item Insiders - \begin{itemize*} - \item profile: age 40-50, management function - \item typically: career peak reached, midlife crisis, new boat, new house, new partner, ... - \item weapons: organisational insider knowledge, organisational authority, management and leadership skills - \end{itemize*} - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Objective: becoming rich(er) + \item Attackers: Competitors, Insiders + \item damage potential: Economical damage (loss of profit) + \end{itemize*} \item Wreak Havoc - \begin{itemize*} - \item Objective: damaging or destroying things or lives, blackmailing, meeting a challenge (egomania, narcissism, sportive challange) - \item Attackers: - \begin{itemize*} - \item Terrorists: motivated by faith and philosophy, paid by organisations and governments - \item Avengers: see insiders - \item Psychos: all ages, all types, personality disorder (egomania, narcissism, paranoia, ...) - \item -> No regular access to IT systems, no insider knowledge, butskills and tools. - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Objective: damaging or destroying things or lives, blackmailing,... + \item Attackers: + \begin{itemize*} + \item Terrorists: motivated by faith and philosophy, paid by organisations and governments + \item Avengers: see insiders + \item Psychos: all ages, all types, personality disorder + \item $\rightarrow$ No regular access to IT systems, no insider knowledge, but skills and tools. + \end{itemize*} + \item damage potential: Loss of critical infrastructures + \end{itemize*} + \item Meet a challenge (Hackers both good or evil) \end{itemize*} \subsubsection{Attack Methods} Exploitation of Vulnerabilities - \begin{itemize*} - \item Human: Social engineering, laziness, lack of knowledge - \item Organizational: Rights management, key management, room access - \item Technical: Weak protection paradigms, specification and implementation errors - \end{itemize*} - \paragraph{Examples} - Scenario 1: Insider Attack + \paragraph{Scenario 1: Insider Attack} \begin{itemize*} - \item Social Engineering, plus - \item Exploitation of conceptual vulnerabilities (DAC),plus + \item Social Engineering + \item Exploitation of conceptual vulnerabilities (DAC) \item Professionally tailored malware \end{itemize*} - Scenario 2: Malware(a family heirloom ...) + \paragraph{Scenario 2: Malware (a family heirloom ...)} \begin{itemize*} - \item Trojan horses: Executable code with hidden functionality. - \item Viruses: Code for self-modification and self-duplication, often coupled with damaging the host. - \item Logical bombs: Code that is activated by some event recognizable from the host (e. g. time, date, temperature, pressure, geographic location, ...). + \item Trojan horses: Executable code with hidden functionality + \item Viruses: Code for self-modification and self-duplication + \item Logical bombs: Code that is activated by some event recognizable from the host (e. g. time, date, temperature, ...). \item Backdoors: Code that is activated through undocumented interfaces (mostly remote). - \item Ransomware: Code for encrypting possibly all user data found on the host, used for blackmailing the victims (to pay for decryption). - \item Worms and worm segments: Autonomous, self-duplicating programs. Originally designed for good: to make use of free computing power in local networks. + \item Ransomware: Code for encrypting possibly all user data found on the host, used for blackmailing the victims + \item Worms and worm segments: Autonomous, self-duplicating programs \end{itemize*} - Scenario 3: Outsider Attack + \paragraph{Scenario 3: Outsider Attack} \begin{itemize*} \item Attack Method: Buffer Overflow \item Exploitation of implementation errors \end{itemize*} - Scenario 4: High-end Malware:Root Kits + \paragraph{Scenario 4: High-end Malware (Root Kits)} \begin{itemize*} - \item Goal: Invisible, total, sustainable takeover of a complete IT system + \item Invisible, total, sustainable takeover of a complete IT system \item Method: Comprehensive tool kit for fully automated attacks - 1. automatic analysis of technical vulnerabilities - 2. automated attack execution - 3. automated installation of backdoors - 4. automated installation and activation of stealth mechanisms + \begin{enumerate*} + \item automatic analysis of technical vulnerabilities + \item automated attack execution + \item automated installation of backdoors + \item automated installation and activation of stealth mechanisms + \end{enumerate*} \item Target: Attacks on all levels of the software stack: - \begin{itemize*} - \item firmware - \item bootloader - \item operating system (e. g. drivers, file system, network interface) - \item system applications (e. g. file and process managers) - \item user applications (e. g. web servers, email, office) - \end{itemize*} + \begin{itemize*} + \item firmware \& bootloader + \item operating system (e. g. file system, network interface) + \item system applications (e. g. file and process managers) + \item user applications (e. g. web servers, email, office) + \end{itemize*} \item tailored to specific software and software versions found there! \end{itemize*} - \paragraph{Root Kits} + \subsubsection{Root Kits} Step 1: Vulnerability Analysis \begin{itemize*} \item Tools look for vulnerabilities in - \begin{itemize*} - \item Active privileged services and demons (from inside a network:nmap, from outside: by port scans) -> Discovers:web server, remote access server (sshd), file server (ftpd), time server (ntpd), print server (cupsd),bluetoothd,smbd, ... - \item Configuration files -> Discovers: weak passwords, open ports - \item Operating systems -> Discovers: kernel and system tool versions with known implementation errors - \end{itemize*} - \item Using built-in knowledge base: an automatable vulnerability database - \item Result: System-specific collection of vulnerabilities -> choice of attack method andtools to execute + \begin{itemize*} + \item Active privileged services and demons (from inside a network :nmap, from outside: by port scans) + \item Configuration files $\rightarrow$ Discover weak passwords, open ports + \item Operating systems $\rightarrow$ Discover kernel and system tool versions with known implementation errors + \end{itemize*} + \item built-in knowledge base: automatable vulnerability database + \item Result: System-specific collection of vulnerabilities $\rightarrow$ choice of attack method and tools to execute \end{itemize*} - Step 2: Attack Execution \begin{itemize*} - \item Fabrication oftailored softwareto exploit vulnerabilities in - \begin{itemize*} - \item Server processes or system tool processes (demons) - \item OS kernel itself - to execute code of attacker withroot privileges - \end{itemize*} + \item Fabrication of tailored software to exploit vulnerabilities in + \begin{itemize*} + \item Server processes or system tool processes (demons) + \item OS kernel to execute code of attacker with root privileges + \end{itemize*} \item This code - \begin{itemize*} - \item First installs smoke-bombs for obscuring attack - \item Then replaces original system software by pre-fabricated modules - \begin{itemize*} - \item servers and demons - \item utilities and libraries - \item OS modules - \end{itemize*} - \item containing - \begin{itemize*} - \item backdoors (-> step 3) - \item smoke bombs for future attacks (-> step 4) - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item First installs smoke-bombs for obscuring attack + \item replaces original system software by pre-fabricated modules servers, utilities, libraries, OS modules + \item containing backdoors or smoke bombs for future attacks + \end{itemize*} \item Results: - \begin{itemize*} - \item Backdoors allow forhigh-privilege access within fractions of seconds - \item System modified with attacker’s servers, demons, utilities, OS modules - \item Obfuscation of modifications and future access - \end{itemize*} + \begin{itemize*} + \item Backdoors allow for high-privilege access in short time + \item System modified with attacker’s servers, demons, utilities... + \item Obfuscation of modifications and future access + \end{itemize*} \end{itemize*} - Step 3: Attack Sustainability \begin{itemize*} - \item Backdoors for any further control \& command in - \begin{itemize*} - \item Servers (e. g.sshdemon) - \item Utilities (e. g.login) - \item Libraries (e. g.PAM, pluggable authentication modules) - \item OS (system calls used by programs likesudo) - \end{itemize*} - \item Modificationsof utilities and OS to prevent - \begin{itemize*} - \item Killing root kit processes and connections (kill,signal) - \item Removal of root kit files (rm,unlink) - \end{itemize*} - \item Results: Unnoticed access for attacker - \begin{itemize*} - \item Anytime - \item Highly privileged - \item Extremely fast - \item Virtually unpreventable - \end{itemize*} + \item Backdoors for any further control \& command in Servers, ... + \item Modifications of utilities and OS to prevent + \begin{itemize*} + \item Killing root kit processes and connections (kill,signal) + \item Removal of root kit files (rm,unlink) + \end{itemize*} + \item Results: Unnoticed access for attacker anytime, highly privileged, extremely fast, virtually unpreventable \end{itemize*} - Step 4: Stealth Mechanisms (Smoke Bombs) \begin{itemize*} \item Clean logfiles (entries for root kit processes, network connections), e.g. syslog,kern.log,user.log,daemon.log,auth.log, ... \item Modify system admin utilities - \begin{itemize*} - \item Process management(hide running root kit processes), e.g. ps,top,ksysguard,taskman - \item File system (hide root kit files), e.g. ls,explorer,finder - \item Network (hide active root kit connections), e.g. netstat,ifconfig,ipconfig,iwconfig - \end{itemize*} + \begin{itemize*} + \item Process management(hide running root kit processes) + \item File system (hide root kit files) + \item Network (hide active root kit connections) + \end{itemize*} \item Substitute OS kernel modules and drivers (hide root kit processes, files, network connections), e.g. /proc/...,stat,fstat,pstat \item Result:Processes, files and communication of root kit become invisible \end{itemize*} Risk and Damage Potential: \begin{itemize*} - \item Likeliness of success: extremely highin today’s commodity OSs - \begin{itemize*} - \item High number of vulnerabilities - \item Speed - \item Refined methodology - \item Fully automated - \end{itemize*} - \item Fighting the dark arts:extremely difficult - \begin{itemize*} - \item Number and cause of vulnerabilities - \item number of "security updates" last month? - \item specification/implementation errors, weak security mechanisms - \item Speed - \item Smoke bombs - \end{itemize*} - \item Prospects for recovering the system after successful attack:near zero + \item Likeliness of success: extremely highin today’s commodity OSs (High number of vulnerabilities, Speed, Refined methodology, Fully automated) + \item Fighting the dark arts: extremely difficult (Number and cause of vulnerabilities, weak security mechanisms, Speed, Smoke bombs) + \item Prospects for recovering the system after successful attack: near zero \end{itemize*} Countermeasures - Options: \begin{itemize*} - \item Reactive: Well ...(even your OS might have become your enemy) - \item Preventive: - \begin{itemize*} - \item Counter with same tools for vulnerability analysis (we do this for years now -> 50 Billions € damage taken...) - \item Write correct software (we try this for years now -> 50 Billions € damage taken...) - \end{itemize*} - \end{itemize*} - - > Security Engineering - \begin{itemize*} - \item New paradigms:policy-controlled systems -> powerful software platforms - \item New provable guarantees: formal security models -> reducing specification errors and faultsby design - \item New security architectures -> limiting bad effectsof implementation errors and faults - \end{itemize*} - - \subsubsection{Damage Potential} - Industrial Espionage: - \begin{itemize*} - \item Loss of control over critical knowledge -> loss of economical or political power(high-risk technologies!) - \item Economical damage (contract penalties, loss of profit, image damage) - Quantity: 50 000 000 000 €, 40\% caused by IT - \end{itemize*} - - Personal Profit: Individual loss of money(zero sum game) - - Terrorism, hackers: - \begin{itemize*} - \item Loss of critical infrastructures (energy, water, communication) - \item Loss of sea, air, land transport infrastructure - \item Damage of financial systems - \end{itemize*} - - \subsubsection{Summary} - Know Your Enemy - \begin{itemize*} - \item Attack goals and attackers - \begin{itemize*} - \item Economical and political power, financial gain - \item Professional organizations, insiders - \end{itemize*} - \item Attack methods und techniques: exploiting vulnerabilities - \begin{itemize*} - \item human - \item organizational - \item technical - \end{itemize*} - \item -> A zoo of threats, practical assistance: - \begin{itemize*} - \item National (Germany): BSI IT-Grundschutz standards and catalogues - \item International:Common Criteria - \end{itemize*} + \item Reactive: even your OS might have become your enemy + \item Preventive: Counter with same tools for vulnerability analysis + \item Preventive: Write correct software \end{itemize*} + \note{Security Engineering}{ + \begin{itemize*} + \item New paradigms: policy-controlled systems $\rightarrow$ powerful software platforms + \item New provable guarantees: formal security models $\rightarrow$ reducing specification errors and faults by design + \item New security architectures $\rightarrow$ limiting bad effects of implementation errors and faults + \end{itemize*} + } \subsection{Risk Analysis} - Goal: Identification and Classification of scenario-specific risks when designing an IT system - - Approach: + Identification and Classification of scenario-specific risks \begin{itemize*} \item Risks $\subseteq$ Vulnerabilities $\times$ Threats - \item Correlation of vulnerabilities and matching threats - \begin{itemize*} - \item -> Risk catalogue - \end{itemize*} - \item Classification of risks - \begin{itemize*} - \item -> Complexity reduction - \end{itemize*} - \item -> Risk matrix + \item Correlation of vulnerabilities and threats $\rightarrow$ Risk catalogue + \item Classification of risks $\rightarrow$ Complexity reduction + \item $\rightarrow$ Risk matrix + \item n Vulnerabilities, m Threats $\rightarrow$ x Risks + \item Correlation of Vulnerabilities and Threats $\rightarrow$ Risk catalogue $n:m$ correlation + \item $max(n,m)<< x \leq nm$ $\rightarrow$ quite large risk catalogue! \end{itemize*} + Risk Classification: Qualitative risk matrix/dimensions - Correlation of Vulnerabilities and Threats - \begin{itemize*} - \item Goal: Risk catalogue: $n:m$ correlation - \end{itemize*} + \includegraphics[width=.3\linewidth]{Assets/Systemsicherheit-risk-classification.png} - \subsubsection{Examples} - \begin{itemize*} - \item Vulnerability: Implementation error in database access control -> Contents can be accessed by unauthorized users - \item Threat: Professional team of attackers, contracted by competitor - \item -> Risk: Confidentiality breach - \end{itemize*} - - - \begin{itemize*} - \item Vulnerability: Conceptual vulnerability: discretionary access control configuration only - \item Threat: Employee in critical financial situation - \item -> Risk: - \begin{itemize*} - \item Disclosure and sale of corporate secrets - \item Redirection of funds - \end{itemize*} - \end{itemize*} - - \begin{itemize*} - \item n Vulnerabilities - \item m Threats - \item -> x Risks - \end{itemize*} - - Usually: $max(n,m)<< x \leq nm$ -> quite largerisk catalogue! - - - \subsubsection{Risk Classification} - Goal: Catalogue reduction -> major and minor risks - - Approach: Qualitative risk matrix; dimensions: - - %![](Assets/Systemsicherheit-risk-classification.png) - - \subsubsection{Risk Matrix} + \subsubsection{Assessment} Damage Potential Assessment - - Examples for risks: \begin{itemize*} - \item Cloud computing:"Loss of VM integrity" -> contract penalties, loss of confidence/reputation - \item Industrial plant control:"Tampering with frequency converters" -> damage or destruction of facility - \item Critical public infrastructure:"Loss of availability due to DoS attacks" -> interrupted services, possible impact on public safety (cf. Finnish heating plant) - \item Traffic management:"Loss of GPS data integrity" -> maximum credible accident w. r. t. safety + \item Cloud computing $\rightarrow$ loss of confidence/reputation + \item Industrial plant control $\rightarrow$ damage or destruction of facility + \item Critical public infrastructure $\rightarrow$ interrupted services, possible impact on public safety + \item Traffic management $\rightarrow$ maximum credible accident + \end{itemize*} + Occurrence Probability Assessment + \begin{itemize*} + \item Cloud computing $\rightarrow$ depending on client data sensitivity + \item Industrial plant control $\rightarrow$ depending on plant sensitivity + \item Critical public infrastructure $\rightarrow$ depending on terroristic threat level + \item Traffic management $\rightarrow$ depending on terroristic threat level \end{itemize*} - \paragraph{General Fact: Damage potential is highly scenario-specific} - Example: "Confidentiality breach of database contents" - \begin{itemize*} - \item Articles in online newspapers - \begin{itemize*} - \item -> small to mediumdamage due to lost paywall revenues - \end{itemize*} - \item Account data of banks - \begin{itemize*} - \item -> mission-criticalloss of trust - \end{itemize*} - \item Plant control data of industrial production facility - \begin{itemize*} - \item -> mission-criticalloss of market leadership - \end{itemize*} - \end{itemize*} - - Depends on diverse, mostly non-technical side conditions -> advisory board needed for assessment:engineers, managers, users, ... - - \paragraph{Occurrence Probability Assessment} - Examples for risks: - \begin{itemize*} - \item Cloud computing:"Loss of VM integrity" - \begin{itemize*} - \item -> depending on client data sensitivity - \end{itemize*} - \item Industrial plant control:"Tampering with frequency converters" - \begin{itemize*} - \item -> depending on plant sensitivity(cf.Stuxnet: nuclear centifuges) - \end{itemize*} - \item Critical public infrastructure:"Loss of availability due to DoS attacks" - \begin{itemize*} - \item -> depending on terroristic threat level - \end{itemize*} - \item Traffic management:"Loss of GPS data integrity" - \begin{itemize*} - \item -> depending on terroristic threat level - \end{itemize*} - \end{itemize*} - - General Fact: Occurrence probability ishighly scenario-specific - - Example: "Confidentiality breach of database contents" - \begin{itemize*} - \item Articles in online newspapers - \begin{itemize*} - \item -> smallfor articles that are publicly available anyway - \end{itemize*} - \item Account data of banks - \begin{itemize*} - \item -> medium, due to high attack costs compared to potential gain - \end{itemize*} - \item Plant control data of industrial production facility - \begin{itemize*} - \item -> high, due to high financial or political gain - \end{itemize*} - \end{itemize*} - - Depends on diverse, mostly non-technical side conditions -> advisory board needed for assessment:engineers, managers, users, ... - + \note{Damage potential \& Occurrence probability}{is highly scenario-specific} + Depends on diverse, mostly non-technical side conditions $\rightarrow$ advisory board needed for assessment \paragraph{Advisory Board Output Example} - \begin{tabular}{l|l|l|l} - Object & Risk & Dmg. Pot. & Rationale \\\hline - Personal Data (PD) & Loss of Confidentiality & medium & (1) Data protection acts, (2) Violation of personal rights \\ - & Loss of Integrity & low & Errors fast and easily detectable and correctable \\ - & Loss of Availability & low & Failures up to one week can be tolerated by manual procedures \\ - Technical Control Data (TCD) & Loss of Confidentiality & high & Loss of market leadership \\ - & Loss of Integrity & high & Production downtime \\ - & Loss of Availability & low & Minimal production delay, since backups are available - \end{tabular} - \begin{tabular}{l|l|l|l} - Object & Risk & Dmg. Pot. & Rationale \\\hline - Personal Data (PD) & Loss of Confidentiality & medium & Certified software \\ - & Loss of Integrity & low & Certified software, small incentive \\ - & Loss of Availability & medium & Certified software \\ - Technical Control Data (TCD) & Loss of Confidentiality & high & Huge financial gain by competitors \\ - & Loss of Integrity & medium & Medium gain by competitors or terroristic attackers \\ - & Loss of Availability & low & Small gain by competitors or terroristic attackers + \begin{tabular}{ l | l | p{.6cm} | p{4cm} } + Object & Risk (Loss of...) & Dmg. Pot. & Rationale \\\hline + PD & Confidentiality & med & Data protection acts \\ + PD & Confidentiality & med & Certified software \\ + PD & Integrity & low & Errors fast and easily detectable and correctable \\ + PD & Integrity & low & Certified software, small incentive \\ + PD & Availability & med & Certified software \\ + PD & Availability & low & Failures up to one week can be tolerated by manual procedures \\ + TCD & Confidentiality & high & Huge financial gain by competitors \\ + TCD & Confidentiality & high & Loss of market leadership \\ + TCD & Integrity & high & Production downtime \\ + TCD & Integrity & med & Medium gain by competitors or terroristic attackers \\ + TCD & Availability & low & Minimal production delay, since backups are available \\ + TCD & Availability & low & Small gain by competitors or terroristic attackers \end{tabular} + PD = Personal Data; TCD = Technical Control Data - Resulting Risk Matrix - %![](Assets/Systemsicherheit-risk-matrix-1.png) - - Identify 3 Regions - %![](Assets/Systemsicherheit-Risk-Matrix-2.png) + \begin{multicols*}{2} + \begin{center} + Resulting Risk Matrix + \includegraphics[width=.9\linewidth]{Assets/Systemsicherheit-risk-matrix-1.png} + \end{center} + \begin{center} + Identify 3 Regions + \includegraphics[width=.9\linewidth]{Assets/Systemsicherheit-Risk-Matrix-2.png} + \end{center} + \end{multicols*} Form Risks to Security Requirements \begin{itemize*} - \item avoid: Intolerable risk, no reasonable proportionality of costs and benefits - \begin{itemize*} - \item -> Don’t implement such functionality! - \end{itemize*} - \item bear: Acceptable risk - \begin{itemize*} - \item -> Reduce economical damage, e. g. by insurance. - \end{itemize*} - \item deal with: Risks that yieldsecurity requirements - \begin{itemize*} - \item -> Prevent or control by system-enforced security policies. - \end{itemize*} + \item avoid: Intolerable risk, no reasonable proportionality of costs and benefits $\rightarrow$ Don’t implement such functionality! + \item bear: Acceptable risk $\rightarrow$ Reduce economical damage (insurance) + \item deal with: Risks that yield security requirements $\rightarrow$ Prevent or control by system-enforced security policies. \end{itemize*} Additional Criteria: \begin{itemize*} \item Again, non-technical side conditions may apply: - \begin{itemize*} - \item Expenses for human resources and IT - \item Feasibility from organizational and technological viewpoints - \end{itemize*} - \item -> Cost-benefit ratio:management and business experts involved + \begin{itemize*} + \item Expenses for human resources and IT + \item Feasibility from organizational and technological viewpoints + \end{itemize*} + \item $\rightarrow$ Cost-benefit ratio:management and business experts involved \end{itemize*} - \section{Security Policies and Models} - \subsection{Security Policies} - Motivation - A Traditional Scenario: \begin{itemize*} - \item Similarity to systems security:protecting valued assets from threats (human life, cargo, ship) - \item Difference: thousands of years of experience - \item $\rightarrow$ We may learn something here! + \item protect against collisions $\rightarrow$ Security Mechanisms + \item $\rightarrow$ Competent \& coordinated operation of mechanisms $\rightarrow$ Security Policies + \item $\rightarrow$ Effectiveness of mechanisms and enforcement of security policies $\rightarrow$ Security Architecture \end{itemize*} + Security Policies: a preliminary Definition \begin{itemize*} - \item What Protects these Assets? - \begin{itemize*} - \item Navigation lights:protect against collisions - \item Cannons/Guns:protect against pirates - \item Reefs, drift anchors:protect against bad weather - \end{itemize*} - \item $\rightarrow$ Security Mechanisms - \begin{itemize*} - \item Watch:protect against collisions - \item The art of sailing, regulations:protect against \& comply with special marine conditions(climate, traffic, canal navigation rules) - \end{itemize*} - \item $\rightarrow$ Competent \& coordinated operation of mechanisms - \item $\rightarrow$ Security Policies - \begin{itemize*} - \item Construction of hull - \item Placement of security mechanisms(nav lights in hold) - \end{itemize*} - \item $\rightarrow$ Effectiveness of mechanisms and enforcement of security policies - \item $\rightarrow$ Security Architecture + \item We have risks: Malware attack $\rightarrow$ violation of confidentiality and integrity of patient’s medical records + \item We infer security requirements: Valid information flows + \item We design a security policy: Rules for controlling information flows \end{itemize*} - \subsubsection{Terminology} - Security Policies: A Preliminary Definition - \begin{itemize*} - \item We have risks: - \begin{itemize*} - \item Gales $\rightarrow$ ship capsizes, pirates $\rightarrow$ ship captured - \item Malware attack $\rightarrow$ violation of confidentiality and integrity of patient’s medical records - \end{itemize*} - \item We infer security requirements: - \begin{itemize*} - \item Protect against gale force 12 - \item Valid information flows - \end{itemize*} - \item We design a security policy: - \begin{itemize*} - \item Rules for dealing with storms, pirates - \item Rules for controlling information flows - \end{itemize*} - \end{itemize*} + \note{Security Policy}{a set of rules designed to meet a set of security objectives} - > Security Policy - > - > A set of rules designed to meet a set of security objectives. - - > Security Objective - > - > A statement of intent to counter a given threat or to enforce a given security - policy. - (Common Criteria for Information Technology Security Evaluation, since 1996) + \note{Security Objective}{a statement of intent to counter a given threat or to enforce a given security policy} Policy representations: \begin{itemize*} @@ -1048,276 +639,155 @@ \item executable code \end{itemize*} - \paragraph{Example 1: Excerpt from the Unix Security Policy} + How to Implement Security Policies \begin{itemize*} - \item $\exists$ subjects(humans, processes) and objects(files, sockets, ...) - \item Each object has an owner - \item Owners control access permissions for their objects ($\rightarrow$ DAC) - \item $\exists$ 3 permissions: read, write, execute - \item $\forall$ objects: specific permissions can be granted for 3 subject classes: owner, group, others - \item Example: `\item rw\item r-\item r-\item 1 peter vsbs 2020-04-19 23:59 syssec-03.pdf` - \item Result: - \begin{itemize*} - \item $\rightarrow$ identity based + discretionary access control (IBAC + DAC) - \item $\rightarrow$ high degree of individual freedom - \item $\rightarrow$ global responsibility, limited individual horizon - \end{itemize*} - \end{itemize*} - - \paragraph{Example 2: Excerpt from the AlphaCompany Security Policy} - \begin{itemize*} - \item Authentication: - 1. Each user must be identified based on key certificates issued by Airbus - \item Authorization: - 2. Access to ProjectX files is granted only to the project staff (role-based access control) - 3. Changes to files are allowed only if both, the responsible engineer as well as the project leader, approve ("four eyes principle") - 4. No information must flow from ProjectX to sales department - \item Communication: - 5. For protecting integrity, confidentiality and authenticity, every communication is encrypted and digitally signed. - \end{itemize*} - - How to Implement Security Policies - Some Previews - \begin{itemize*} - \item A Integrated insystems software - \begin{itemize*} - \item Operating systems - \item Database systems - \item Middleware platforms - \end{itemize*} - \item B Integrated inapplication systems + \item (A) Integrated in systems software ( Operating, Database) + \item (B) Integrated in application systems \end{itemize*} \subsubsection{Implementation Alternative A} - The security policy is handled anOS abstractionon its own $\rightarrow$ implemented inside the kernel - %![](Assets/Systemsicherheit-pos.png) + The security policy is handled an OS abstractionon its own $\rightarrow$ implemented inside the kernel + \includegraphics[width=.5\linewidth]{Assets/Systemsicherheit-pos.png} Policy Enforcement in SELinux \begin{itemize*} - \item Security Server: Policy runtime environment (protected in kernel space) - \item Interceptors:Total control of critical interactions - \item Policy Compiler: Translates human-readable policy modules in kernel-readable binary modules - \item Security Server: Manages and evaluates these modules + \item \textbf{Security Server} Policy runtime environment + \item \textbf{Interceptors} Total control of critical interactions + \item \textbf{Policy Compiler} Translates human-readable policy modules in kernel-readable binary modules + \item \textbf{Security Server} Manages and evaluates these modules \end{itemize*} \subsubsection{Implementation Alternative B} - Application-embedded Policy: The security policy is only known and enforced by oneuser program $\rightarrow$ implemented in a user-space application - - Application-level Security Architecture: The security policy is known and enforced by several collaborating user programs in anapplication systems $\rightarrow$ implemented in a local, user-space security architecture - - Policy Server Embedded in Middleware: The security policy is communicated and enforced by several collaborating user programs in adistributed application systems $\rightarrow$ implemented in a distributed, user-space security architecture - - %![](Assets/Systemsicherheit-application-embedded-policy.png) - + \begin{itemize*} + \item \textbf{Application-embedded Policy} The security policy is only known and enforced by oneuser program $\rightarrow$ implemented in a user-space application + \item \textbf{Application-level Security Architecture} The security policy is known and enforced by several collaborating user programs in an application systems $\rightarrow$ implemented in a local, user-space security architecture + \item \textbf{Policy Server Embedded in Middleware} The security policy is communicated and enforced by several collaborating user programs in a distributed application systems $\rightarrow$ implemented in a distributed, user-space security architecture + \end{itemize*} + \includegraphics[width=.5\linewidth]{Assets/Systemsicherheit-application-embedded-policy.png} \subsection{Security Models} - Why We Use Formal Models - Goal of Formal Security Models \begin{itemize*} \item Complete, unambiguous representation of security policies for - 1. analyzing and explaining its behavior: - \begin{itemize*} - \item $\rightarrow$ "This security policy will never allow that ..." - \item $\rightarrow$ "This security policy authorizes/denies an access under conditions ... because ..." - 2. enabling its correct implementation: - \item $\rightarrow$ "This rule is enforced by a C++ method ..." - \end{itemize*} + \item analyzing and explaining its behavior + \item enabling its correct implementation \end{itemize*} How We Use Formal Models: Model-based Methodology \begin{itemize*} - \item Abstraction from (usually too complex) reality $\rightarrow$ get rid of insignificant details e. g.: allows statements about computability and computation complexity - \item Precisionin describing what is significant $\rightarrow$ Model analysis and implementation + \item Abstraction from (usually too complex) reality $\rightarrow$ get rid of insignificant details + \item Precisionin describing what is significant $\rightarrow$ Model analysis and implementation \end{itemize*} - > Security Model - > - > A security model is a precise, generally formal representation of a security policy. + \note{Security Model}{A security model is a precise, generally formal representation of a security policy.} Model Spectrum \begin{itemize*} \item Models for access control policies: - \begin{itemize*} - \item identity-based access control (IBAC) - \item role-based access control (RBAC) - \item attribute-based access control (ABAC) - \end{itemize*} - \item Models for information flow policies - \begin{itemize*} - \item $\rightarrow$ multilevel security(MLS) - \end{itemize*} - \item Models for non-interference/domain isolation policies - \begin{itemize*} - \item $\rightarrow$ non-interference(NI) - \end{itemize*} - \item In Practice: Most oftenhybrid models + \begin{itemize*} + \item identity-based access control (IBAC) + \item role-based access control (RBAC) + \item attribute-based access control (ABAC) + \end{itemize*} + \item Models for information flow policies $\rightarrow$ multilevel security (MLS) + \item Models for non-interference/domain isolation policies $\rightarrow$ non-interference (NI) + \item In Practice: Most often hybrid models \end{itemize*} - \subsubsection{Access Control Models} - Formal representations of permissions to execute operations on objects, e. g.: - \begin{itemize*} - \item Reading files - \item Issuing payments - \item Controlling industrial centrifuges - \end{itemize*} - Security policies describeaccess rules $\rightarrow$ security models formalize them + Formal representations of permissions to execute operations on objects - Taxonomy - > Identity-based access control models (IBAC) - > - > Rules based on the identity of individual subjects (users, apps, processes, ...) or objects (files, directories, database tables, ...) $\rightarrow$ "Ann may read ProjectX Files." + Security policies describe access rules $\rightarrow$ security models formalize them Taxonomy + \note{Identity-based access control models (IBAC)}{Rules based on the identity of individual subjects (users, apps, processes, ...) or objects (files, directories, database tables, ...)} - > Role-based access control models (RBAC) - > - > Rules based on roles of subjects in an organization $\rightarrow$ "Ward physicians may modify electronic patient records (EPRs) in their ward." + \note{Role-based access control models (RBAC)}{Rules based on roles of subjects in an organization} - > Attribute-based access control models (ABAC) - > - > Rules based on attributes of subjects and objects $\rightarrow$ "PEGI 18 rated movies may only be streamed to users aged 18 and over." - - > Discretionary Access Control (DAC) - > - > Individual users specify access rules to objects within their area of responsibility ("at their discretion"). - - Example: Access control in many OS (e. g. Unix(oids), Windows) + \note{Attribute-based access control models (ABAC)}{Rules based on attributes of subjects and objects} + \note{Discretionary Access Control (DAC)}{Individual users specify access rules to objects within their area of responsibility (at their discretion).} Consequence: Individual users \begin{itemize*} - \item enjoy freedom w. r. t. granting access permissions as individually needed - \item need to collectively enforce their organization’s security policy: - \begin{itemize*} - \item competency problem - \item responsibility problem - \item malware problem - \end{itemize*} - \end{itemize*} - - > Mandatory Access Control (MAC) - > - > System designers and administrators specify system-wide rules, that apply for all users and cannot be sidestepped. - - Examples: - \begin{itemize*} - \item Organizational: airport security check - \item Technical: medical information systems, policy-controlled operating systems(e. g. SELinux) + \item granting access permissions as individually needed + \item need to collectively enforce their organization’s security policy + \begin{itemize*} + \item competency problem + \item responsibility problem + \item malware problem + \end{itemize*} \end{itemize*} + \note{Mandatory Access Control (MAC)}{System designers and administrators specify system-wide rules, that apply for all users and cannot be sidestepped.} Consequence: \begin{itemize*} \item Limited individual freedom \item Enforced by central instance: - \begin{itemize*} - \item clearly identified - \item competent (security experts) - \item responsible (organizationally \& legally) - \end{itemize*} + \begin{itemize*} + \item clearly identified + \item competent (security experts) + \item responsible (organizationally \& legally) + \end{itemize*} \end{itemize*} \paragraph{DAC vs. MAC} - In Real-world Scenarios: Mostly hybrid models enforced by both discretionary and mandatory components, e. g.: + In Real-world Scenarios: Mostly hybrid models enforced by both discretionary and mandatory components \begin{itemize*} - \item DAC: locally within a project, team members individually define permissions w. r. t. documents (implemented in project management software and workstation OSs) inside this closed scope; - \item MAC:globally for the organization, such that e. g. only documents approved for release by organizational policy rules (implemented in servers and their communication middleware) may be accessed from outside a project’s scope. + \item \textbf{DAC} locally within a project, team members individually define permissions w. r. t. documents inside this closed scope + \item \textbf{MAC} globally for the organization, such that e. g. only documents approved for release by organizational policy rules may be accessed from outside a project’s scope \end{itemize*} - \paragraph{Identity-based Access Control Models (IBAC)} - Goal: To precisely specify the rights ofindividual, acting entities. - - Basic IBAC Paradigm %![](Assets/Systemsicherheit-ibac-basic.png) - \begin{itemize*} - \item User named s reads file named o - \item Client s transfers money to bank account o - \item Process with ID s sends over socket with ID o - \end{itemize*} - + To precisely specify the rights of individual, acting entities. + \begin{center} + \includegraphics[width=.5\linewidth]{Assets/Systemsicherheit-ibac-basic.png} + \end{center} There are \begin{itemize*} - \item Subjects, i. e. active and identifiable entities, that execute - \item operations on - \item passive and identifiable objects, requiring - \item rights (also: permissions, privileges) which - \begin{itemize*} - \item control (restrict) execution of operations, - \item are checked against identity of subjects and objects. - \end{itemize*} + \item \textbf{Subjects}, i.e. active and identifiable entities, that execute + \item \textbf{Operations} on + \item passive and identifiable \textbf{Objects}, requiring + \item \textbf{Rights} (also: permissions, privileges) which + \begin{itemize*} + \item control (restrict) execution of operations, + \item are checked against identity of subjects and objects. + \end{itemize*} \end{itemize*} Access Control Functions [Lampson, 1974] \begin{itemize*} \item A really basic model to define access rights: - \begin{itemize*} - \item Who (subject) is allowed to do what (operation) on which object - \item Fundamental to OS access control since 1965 (Multics OS) - \item Formal paradigms: sets and functions - \end{itemize*} + \begin{itemize*} + \item Who (subject) is allowed to do what (operation) on which object + \item Fundamental to OS access control since 1965 + \item Formal paradigms: sets and functions + \end{itemize*} \item Access Control Function (ACF) - \begin{itemize*} - \item $f:S \times O \times OP \rightarrow \{true,false\}$ where - \item S is a set of subjects (e. g. users, processes), - \item O is a set of objects(e. g. files, sockets, EPRs), - \item OP is a finite set of operations(e. g. reading, writing, deleting). - \end{itemize*} - \item Interpretation: Rights to execute operations are modeled by the ACF: - \begin{itemize*} - \item any $s\in S$ represents an authenticated active entity (e. g. a user or process) which potentially executes operations on objects - \item any $o\in O$ represents an authenticated passive entity (e. g. a file or a database table) on which operations are executed - \item for any $s\in S$,$o\in O$,$op\in OP$:s is allowed to execute op on o iff f(s,o,op)=true. - \item Model making: finding a $tuple⟨S,O,OP,f⟩$ - \item $\rightarrow$ Definition of S,O, and OP - \item $\rightarrow$ Definition of f - \end{itemize*} + \begin{itemize*} + \item $f:S \times O \times OP \rightarrow \{true,false\}$ where + \item S is a set of subjects (e. g. users, processes), + \item O is a set of objects(e. g. files, sockets), + \item OP is a finite set of operations(e. g. read, write, delete) + \end{itemize*} + \item Interpretation: Rights to execute operations are modeled by ACF + \begin{itemize*} + \item any $s\in S$ represents an authenticated active entity which potentially executes operations on objects + \item any $o\in O$ represents an authenticated passive entity on which operations are executed + \item for any $s\in S$,$o\in O$,$op\in OP$:s is allowed to execute $op$ on $o$ iff $f(s,o,op)=true$. + \item Model making: finding a $tuple⟨S,O,OP,f⟩$ + \end{itemize*} \end{itemize*} - iff = "if and only if" - - Example: Implementation of f in a Unix OS (heavily simplified): - \begin{itemize*} - \item S: set of identifiers for users who execute processes - \item O: set of identifiers for system objects, e. g. files, directories, sockets, ... - \item OP: set of system call identifiers - \end{itemize*} - - Example for f(caller,file,read): - - \begin{lstlisting}[ - language=C++, - showspaces=false, - basicstyle=\ttfamily, - commentstyle=\color{gray} - ] - read ( caller , file ) { - if !(caller.uid == 0) {/* is caller == root? */ - if !(R_MODE in file.inode.othersRWX) {/* check "other"-rights */ - if !(caller.gid == file.inode.group && R_MODE in file.inode.groupRWX) {/* check "group"-rights */ - if !(caller.uid == file.inode.owner && R_MODE in file.inode.ownerRWX) {/* check "group"-rights */ - return ERR_ACCESS_DENIED;/* insufficient rights: deny access */ - } } } - /* execute syscall "read" */ - } - \end{lstlisting} - \paragraph{Access Control Matrix} - Access Control Functions in Practice - Lampson [1974] already addresses the questions how to ... + Lampson [1974] addresses the questions how to ... \begin{itemize*} \item store in a well-structured way, - \item efficiently evaluate, and - \item completely analyze an ACF: + \item efficiently evaluate and + \item completely analyze an ACF \end{itemize*} - > Access Control Matrix (ACM) - > - > An ACM is a matrix $m:S\times O \rightarrow 2^{OP}$, such that $\forall s\in S,\forall o\in O:op\in m(s,o)\Leftrightarrow f(s,o,op)$. + \note{Access Control Matrix (ACM)}{An ACM is a matrix $m:S\times O \rightarrow 2^{OP}$, such that $\forall s\in S,\forall o\in O:op\in m(s,o)\Leftrightarrow f(s,o,op)$.} - An ACM is a rewriting of the definition of an ACF: nothing is added, nothing is left out ("$\Leftrightarrow$"). Despite a purely theoretical model: paved the way for practically implementing AC meta-informationas - \begin{itemize*} - \item tables - \item 2-dimensional lists - \item distributed arrays and lists - \end{itemize*} + An ACM is a rewriting of the definition of an ACF: nothing is added, nothing is left out ("$\Leftrightarrow$"). Despite a purely theoretical model: paved the way for practically implementing AC meta-information as tables, 2-dimensional lists, distributed arrays and lists. Example \begin{itemize*} @@ -1325,68 +795,42 @@ \item $O=\{o_1 ,...,o_k\}$ \item $OP=\{read,write\}$ \item $2^{OP}=\{\varnothing,\{read\},\{write\},\{read,write\}\}^2$ - %![](Assets/Systemsicherheit-access-control-matrix.png) + %![](Assets/Systemsicherheit-access-control-matrix.png) \end{itemize*} Implementation Notes \begin{itemize*} - \item ACMs are implemented in most - \begin{itemize*} - \item Operating systems - \item Database information systems - \item Middleware platforms(CORBA, Jini/Apache River, Web Services) - \item Distributed security architectures (Kerberos) - \end{itemize*} - \item whose security mechanisms use one of two implementations: + \item ACMs are implemented in most OS, DB, Middlewear + \item whose security mechanisms use one of two implementations \end{itemize*} Access Control Lists (ACLs) \begin{itemize*} - \item Columns of the ACM: `char*o3[N] = { "-", "-", "rw", ...};` + \item Columns of the ACM: $char*o3[N]=\{ '-', '-', 'rw', ...\};$ \item Found in I-Nodes of Unix(oids), Windows, Mac OS \end{itemize*} Capability Lists \begin{itemize*} - \item Rows of the ACM: `char* s1[K] = { "-", "r", "-", ...};` + \item Rows of the ACM: $char* s1[K]=\{'-', 'r', '-', ...\};$ \item Found in distributed OSs, middleware, Kerberos \end{itemize*} - What we Actually Model: - > Protection State - > - > A fixed-time snapshot of all active entities, passive entities, and any meta-information used for making access decisions is called theprotection state of an access control system. + What we actually Model: + \note{Protection State}{A fixed-time snapshot of all active entities, passive entities, and any meta-information used for making access decisions is called theprotection state of an access control system.} - > Goal of ACFs/ACMs - > - > To precisely specify a protection state of an AC system. + Goal of ACF/ACM is to precisely specify a protection state of an AC system. \paragraph{The Harrison-Ruzzo-Ullman Model (HRU)} - Our HIS scenario ... modeled by an ACM: - \begin{itemize*} - \item $S=\{cox, kelso, carla,...\}$ - \item $O=\{patId, diag, medic,...\}$ - \end{itemize*} - \begin{tabular}{c|c|c|c} - m & parId & diag & medic \\\hline - cox & {read, write} & {read, write} & {read, write} \\ - kelso & {read} & {read} & {read} \\ - carla & {read} & $\varnothing$ & {read} \\ - ... - \end{tabular} - - We might do it like this, but ... Privilege escalation question: - "Can it ever happen that in a given state, some specific subject obtains a specific permission?" + Privilege escalation question: "Can it ever happen that in a given state, some specific subject obtains a specific permission?" $\varnothing \Rightarrow \{r,w\}$ \begin{itemize*} \item ACM models a single state ⟨S,O,OP,m⟩ - \item ACM does not tell us anything about what might happen in the future + \item ACM does not tell anything about what might happen in future \item Behavior prediction $\rightarrow$ proliferation of rights $\rightarrow$ HRU safety \end{itemize*} - Why "safety", not "security"? Well, historical ... - We need a model which allows statements about \begin{itemize*} \item Dynamic behavior of right assignments @@ -1399,72 +843,65 @@ \item Deterministic automata (state machines) $\rightarrow$ for modeling runtime changes of a protection state \end{itemize*} - This idea was pretty awesome. We need to understand automata, since from then on they were used for most security models. $\rightarrow$ Small excursus + This idea was pretty awesome. We need to understand automata, since from then on they were used for most security models. \paragraph{Deterministic Automata} - Mealy Automaton: $⟨Q,\sum,\Omega,\delta,\lambda,q_0⟩$ + Mealy Automat $(Q,\sum,\Omega,\delta,\lambda,q_0)$ \begin{itemize*} - \item $Q$ is a finite set of states (state space), e. g. $Q=\{q_0 ,q_1 ,q_2\}$ - \item $\sum$ is a finite set of input words (input alphabet), e. g. $\sum=\{a,b\}$ - \item $\Omega$ is a finite set of output words (output alphabet), e. g. $\Omega=\{yes,no\}$ + \item $Q$ is a finite set of states, e. g. $Q=\{q_0 ,q_1 ,q_2\}$ + \item $\sum$ is a finite set of input words, e. g. $\sum=\{a,b\}$ + \item $\Omega$ is a finite set of output words, e. g. $\Omega=\{yes,no\}$ \item $\delta:Q\times\sum\rightarrow Q$ is the state transition function \item $\lambda:Q\times\sum\rightarrow\Omega$ is the output function \item $q_0\in Q$ is the initial state - \item $\delta(q,\sigma)=q'$ and $\lambda(q,\sigma)=\omega$ can be expressed through thestate diagram: a directed graph $⟨Q,E⟩$, where each edge $e\in E$ is represented by a state transition’s predecessor node $q$, its successor node $q'$, and a string "$\sigma|\omega$" of its input and output, respectively. - %![](Assets/Systemsicherheit-mealy-automaton.png) + \item $\delta(q,\sigma)=q'$ and $\lambda(q,\sigma)=\omega$ can be expressed through the state diagram \end{itemize*} - Example: Return "yes" for any input in an unbroken sequence of "a" or "b", "no" otherwise. - %![](Assets/Systemsicherheit-mealy-beispiel.png) - \paragraph{HRU Security Model} How we use Deterministic Automata \begin{itemize*} - \item Snapshot of an ACMis the automaton’s state - \item Changes of the ACMduring system usage are modeled by state transitions of the automaton - \item Effects ofoperationsthat cause such transitions are described by the state transition function - \item Analyses ofright proliferation($\rightarrow$ privilege escalation)are enabled by state reachability analysis methods + \item Snapshot of an ACM is the automaton’s state + \item Changes of the ACM during system usage are modeled by state transitions of the automaton + \item Effects of operations that cause such transitions are described by the state transition function + \item Analyses of right proliferation ($\rightarrow$ privilege escalation) are enabled by state reachability analysis methods \end{itemize*} An HRU model is a deterministic automaton $⟨Q,\sum,\delta,q_0 ,R⟩$ where \begin{itemize*} \item $Q= 2^S\times 2^O\times M$ is the state space where - \begin{itemize*} - \item S is a (not necessarily finite) set of subjects, - \item O is a (not necessarily finite) set of objects, - \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is a (not necessarily finite) set of possible ACMs, - \end{itemize*} + \begin{itemize*} + \item S is a (not necessarily finite) set of subjects, + \item O is a (not necessarily finite) set of objects, + \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is a set of possible ACMs, + \end{itemize*} \item $\sum=OP\times X$ is the (finite) input alphabet where - \begin{itemize*} - \item $OP$ is a set of operations, - \item $X=(S\cup O)^k$ is a set of k-dimensional vectors of arguments (subjects or objects) of these operations, - \end{itemize*} + \begin{itemize*} + \item $OP$ is a set of operations, + \item $X=(S\cup O)^k$ is a set of k-dimensional vectors of arguments (subjects or objects) of these operations, + \end{itemize*} \item $\sigma:Q\times\sum\rightarrow Q$ is the state transition function, \item $q_0\in Q$ is the initial state, \item R is a (finite) set of access rights. \end{itemize*} - Interpretation \begin{itemize*} \item Each $q=S_q,O_q,m_q\in Q$ models a system’s protection state: - \begin{itemize*} - \item current subjects set $S_q\subseteq S$ - \item current objects set $O_q\subseteq O$ - \item current ACM $m_q\in M$ where $m_q:S_q\times O_q\rightarrow 2^R$ - \end{itemize*} + \begin{itemize*} + \item current subjects set $S_q\subseteq S$ + \item current objects set $O_q\subseteq O$ + \item current ACM $m_q\in M$ where $m_q:S_q\times O_q\rightarrow 2^R$ + \end{itemize*} \item State transitions modeled by $\delta$ based on - \begin{itemize*} - \item the current automaton state - \item an input word $⟨op,(x_1,...,x_k)⟩\in\sum$ where $op$ - \begin{itemize*} - \item may modify $S_q$ (create a user $x_i$, kill a process $x_i$ etc.), - \item may modify $O_q$ (create/delete a file $x_i$, open a socket $x_i$ etc.), - \item may modify the contents of a matrix cell $m_q(x_i,x_j)$ (enter or remove rights) where $1\leq i,j\leq k$. - \end{itemize*} - \item $\rightarrow$ We also call $\delta$ the state transition scheme (STS) of a model. - \item Historically: "authorization scheme" [Harrison et al., 1976]. - \end{itemize*} + \begin{itemize*} + \item the current automaton state + \item an input word $⟨op,(x_1,...,x_k)⟩\in\sum$ where $op$ + \item may modify $S_q$ (create a user $x_i$), + \item may modify $O_q$ (create/delete a file $x_i$), + \item may modify the contents of a matrix cell $m_q(x_i,x_j)$ (enter or remove rights) where $1\leq i,j\leq k$. + \item $\rightarrow$ We also call $\delta$ the state transition scheme (STS) of a model. + \item Historically: "authorization scheme" [Harrison et al., 1976]. + \end{itemize*} \end{itemize*} \paragraph{State Transition Scheme (STS)} @@ -1475,22 +912,12 @@ \item $r_1 ...r_m\in R$ \item $x_{s1},...,x_{sm}\in S_q$ and $x_{o1},...,x_{om}\in O_q$ where $s_i$ and $o_i$, $1\leq i\leq m$, are vector indices of the input arguments: $1\leq s_i,o_i\leq k$ \item $p_1,...,p_n$ are HRU primitives - \item Note: $\circ$ is the (transitive) function composition operator: $(f\circ g)(x)=g(f(x))$ + \item $\circ$ is the function composition operator: $(f\circ g)(x)=g(f(x))$ \end{itemize*} - Whenever $q$ is obvious or irrelevant, we use a programming-style notation + Conditions: Expressions that need to evaluate "true" for state q as a necessary precondition for command $op$ to be executable (= can be successfully called). - Interpretation: The structure of STS definitions is fixed in HRU: - \begin{itemize*} - \item "if": A conjunction of condition clauses (or just conditions) with the sole semantics "is some right in some matrix cell". - \item "then": A concatenation (sequential execution) of HRU primitives. - \end{itemize*} - - Conditions: - Expressions that need to evaluate "true" for state q as a necessary precondition for command $op$ to be executable (= can be successfully called). - - Primitives: - Short, formal macros that describe differences between $q$ and $a$ successor state $q'=\sigma(q,⟨op,(x_1 ,...,x_k)⟩)$ that result from a complete execution of op: + Primitives: Short, formal macros that describe differences between $q$ and $a$ successor state $q'=\sigma(q,⟨op,(x_1 ,...,x_k)⟩)$ that result from a complete execution of op: \begin{itemize*} \item enter r into $m(x_s,x_o)$ \item delete r from $m(x_s,x_o)$ @@ -1498,81 +925,43 @@ \item create object $x_o$ \item destroy subject $x_s$ \item destroy object $x_o$ - \item $\rightarrow$ Each of these with the intuitive semantics for manipulating $S_q, O_q$ or $m_q$. + \item Each above with semantics for manipulating $S_q, O_q$ or $m_q$. \end{itemize*} Note the atomic semantics: the HRU model assumes that each command successfully called is always completely executed! How to Design an HRU Security Model: - 1. Model Sets: Subjects, objects, operations, rights $\rightarrow$ define the basic sets $S,O,OP,R$ - 2. STS: Semantics of operations (e. g. the future API of the system to model) that modify the protection state $\rightarrow$ define $\sigma$ using the normalized form/programming syntax of the STS - 3. Initialization: Define a well-known initial stateq $0 =⟨S_0 ,O_0 ,m_0 ⟩$ of the system to model + \begin{enumerate*} + \item Model Sets: Subjects, objects, operations, rights $\rightarrow$ define the basic sets $S,O,OP,R$ + \item STS: Semantics of operations (e. g. the future API of the system to model) that modify the protection state $\rightarrow$ define $\sigma$ using the normalized form/programming syntax of the STS + \item Initialization: Define a well-known initial stateq $0 =⟨S_0 ,O_0 ,m_0 ⟩$ of the system to model + \end{enumerate*} - An Open University Information System - %![](Assets/Systemsicherheit-university-information-system.png) - \begin{itemize*} - \item Informal security policy (heavily simplified):2 rules - \begin{itemize*} - \item "A sample solution for home assignments can be downloaded by students only after submitting their own solution." - \begin{itemize*} - \item a condition for readSample - \item a effect of writeSolution - \end{itemize*} - \item "Student solutions can be submitted only before downloading any sample solution." - \begin{itemize*} - \item a condition for writeSolution - \item a effect of readSample - \end{itemize*} - \end{itemize*} - \end{itemize*} - - - Model Making - 1. Sets + 1. Model Sets \begin{itemize*} \item Subjects, objects, operations, rights: - \begin{itemize*} - \item Subjects: An unlimited number of possible students: $S\cong\mathbb{N}$ (S is isomorphic to $N$) - \item Objects: An unlimited number of possible solutions: $O\cong\mathbb{N}$ - \item Operations: - \begin{itemize*} - \item (a) Submit own solution: $writeSolution(s_{student},o_{solution})$ - \item (b) Download sample solution: $readSample(s_{student},o_{sample})$ - \item $\rightarrow OP=\{writeSolution, readSample\}$ - \end{itemize*} - \item Rights: Exactly one right allows to execute each operation: $R\cong OP$ - \begin{itemize*} - \item $\rightarrow R=\{write, read\}$ - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Subjects: An unlimited number of possible students: $S\cong\mathbb{N}$ + \item Objects: An unlimited number of possible solutions: $O\cong\mathbb{N}$ + \item Operations: + \begin{itemize*} + \item (a) Submit $writeSolution(s_{student},o_{solution})$ + \item (b) Download $readSample(s_{student},o_{sample})$ + \item $\rightarrow OP=\{writeSolution, readSample\}$ + \end{itemize*} + \item Rights: Exactly one allows to execute each operation + \begin{itemize*} + \item $R\cong OP$ $\rightarrow R=\{write, read\}$ + \end{itemize*} + \end{itemize*} \end{itemize*} - 2. State Transition Scheme - \begin{itemize*} - \item Effects of operations on protection state: - \begin{itemize*} - \item writeSolution - Informal Policy: "A sample solution (...) can be downloaded by students only after submitting their own solution." $\Leftrightarrow$ "If the automaton receives an input ⟨writeSolution,(s,o)⟩ and the conditions are satisfied, it transitions to a state where s is allowed to download the sample solution." - \end{itemize*} - \end{itemize*} - - \begin{lstlisting}[ - language=Bash, - showspaces=false - ] - command writeSolution(s,o) ::= if write $\in$ m(s,o) + 2. State Transition Scheme: Effects of operations on protection state + \begin{lstlisting}[language=Bash,showspaces=false] + command writeSolution(s,o) ::= if write in m(s,o) then enter read into m(s,o); fi - \end{lstlisting} - \begin{itemize*} - \item readSample - \item Informal Policy: "Student solutions can be submitted only before downloading any sample solution." $\Leftrightarrow$ "If the automaton receives an input⟨readSample,(s,o)⟩and the conditions are satisfied, it transitions to a state wheresis denied to submit a solution." - \end{itemize*} - \begin{lstlisting}[ - language=Bash, - showspaces=false - ] - command readSample(s,o) ::= if read$\in$ m(s,o) + command readSample(s,o) ::= if read in m(s,o) then delete write from m(s,o); fi @@ -1581,299 +970,179 @@ \begin{itemize*} \item By model definition: $q_0 =⟨S_0 ,O_0 ,m_0 ⟩$ \item For a course with (initially) three students: - \begin{itemize*} - \item $S_0 =\{sAnn, sBob, sChris\}$ - \item $O_0 =\{oAnn, oBob, oChris\}$ - \item $m_0$: - \begin{itemize*} - \item $m_0(sAnn,oAnn)=\{write\}$ - \item $m_0(sBob,oBob)=\{write\}$ - \item $m_0(sChris,oChris)=\{write\}$ - \item $m_0(s,o)=\varnothing \Leftrightarrow s\not= o$ - \end{itemize*} - \item Interpretation: "There is a course with three students, each of whom has their own workspace to which she is allowed to submit (write) a solution." - \end{itemize*} + \begin{itemize*} + \item $S_0 =\{sAnn, sBob, sChris\}$ + \item $O_0 =\{oAnn, oBob, oChris\}$ + \item $m_0$: + \begin{itemize*} + \item $m_0(sAnn,oAnn)=\{write\}$ + \item $m_0(sBob,oBob)=\{write\}$ + \item $m_0(sChris,oChris)=\{write\}$ + \item $m_0(s,o)=\varnothing \Leftrightarrow s\not= o$ + \end{itemize*} + \item Interpretation: "There is a course with three students, each of whom has their own workspace to which she is allowed to submit (write) a solution." + \end{itemize*} \end{itemize*} Model Behavior \begin{itemize*} - \item Initial Protection State - \begin{tabular}{l|l|l|l} - m & oAnn & oBob & oChris \\\hline - sAnn & {write} & $\varnothing$ & $\varnothing$ \\ - sBob & $\varnothing$ & {write} & $\varnothing$ \\ - sChris & $\varnothing$ & $\varnothing$ & {write} - \end{tabular} + \item Initial Protection State at beginning + \begin{center}\begin{tabular}{l|l|l|l} + m & oAnn & oBob & oChris \\\hline + sAnn & {write} & $\varnothing$ & $\varnothing$ \\ + sBob & $\varnothing$ & {write} & $\varnothing$ \\ + sChris & $\varnothing$ & $\varnothing$ & {write} + \end{tabular}\end{center} \item After $writeSolution(sChris, oChris)$ - \begin{tabular}{l|l|l|l} - m & oAnn & oBob & oChris \\\hline - sAnn & {write} & $\varnothing$ & $\varnothing$ \\ - sBob & $\varnothing$ & {write} & $\varnothing$ \\ - sChris & $\varnothing$ & $\varnothing$ & {write, read} - \end{tabular} + \begin{center}\begin{tabular}{l|l|l|l} + m & oAnn & oBob & oChris \\\hline + sAnn & {write} & $\varnothing$ & $\varnothing$ \\ + sBob & $\varnothing$ & {write} & $\varnothing$ \\ + sChris & $\varnothing$ & $\varnothing$ & {write, read} + \end{tabular}\end{center} \item After $readSample(sChris, oChris)$ - \begin{tabular}{l|l|l|l} - m & oAnn & oBob & oChris \\\hline - sAnn & {write} & $\varnothing$ & $\varnothing$ \\ - sBob & $\varnothing$ & {write} & $\varnothing$ \\ - sChris & $\varnothing$ & $\varnothing$ & {read} - \end{tabular} + \begin{center}\begin{tabular}{l|l|l|l} + m & oAnn & oBob & oChris \\\hline + sAnn & {write} & $\varnothing$ & $\varnothing$ \\ + sBob & $\varnothing$ & {write} & $\varnothing$ \\ + sChris & $\varnothing$ & $\varnothing$ & {read} + \end{tabular}\end{center} \end{itemize*} - Summary + Summary: Model Behavior \begin{itemize*} - \item Model Behavior - \begin{itemize*} - \item The model’sinputis a sequence of actions from OP together with their respective arguments. - \item The automaton changes its state according to the STS and the semantics of HRU primitives (here: enter and delete). - \item In the initial state, each student may (repeatedly) submit her respective solution. - \end{itemize*} - \item Tricks in this Example - \begin{itemize*} - \item The sample solution is not represented by a separate object $\rightarrow$ no separate column in the ACM. - \item Instead, we smuggled thereadright for it into the cell of each student’s solution ... - \end{itemize*} - \item Where Do We Stand? - \begin{itemize*} - \item We can now model a security policy for particular IBAC scenarios - \item We can formally express them through an automaton-based framework. - \end{itemize*} - \item What’s Next? Why all this? - \begin{itemize*} - \item Correct specification and implementation of the modeled policy - \item Analysis of security properties $\rightarrow$ Next ... - \end{itemize*} + \item The model’s input is a sequence of actions from OP together with their respective arguments. + \item The automaton changes its state according to the STS and the semantics of HRU primitives. + \item In the initial state, each student may (repeatedly) submit her respective solution. + \end{itemize*} + Tricks in this Example + \begin{itemize*} + \item The sample solution is not represented by a separate object $\rightarrow$ no separate column in the ACM. + \item Instead, we smuggled the read right for it into the cell of each student’s solution ... \end{itemize*} \paragraph{HRU Model Analysis} - \begin{itemize*} - \item Reminder: "For a given security model, is it possible that a subjecteverobtains a specific permission with respect to a specific object?" - \item Analysis of Right Proliferation $\rightarrow$ The HRU safety problem. - \end{itemize*} + Analysis of Right Proliferation $\rightarrow$ The HRU safety problem. InputSequences \begin{itemize*} - \item "What is the effect of an input in a given state?" $\rightarrow$ asingle state transitionas defined by $\delta$ - \item "What is the effect of an input sequence in a given state?" $\rightarrow$ a composition ofsequential state transitionsas defined by $\delta*$ + \item ,,What is the effect of an input in a given state?'' $\rightarrow$ a single state transition as defined by $\delta$ + \item ,,What is the effect of an input sequence in a given state?'' $\rightarrow$ a composition of sequential state transitions as defined by $\delta*$ \end{itemize*} - > Transitive State Transition Function $\delta^*$ - > - > Let $\sigma\sigma\in\sum^*$ be a sequence of inputs consisting of a single input $\sigma\in\sum\cup\{\epsilon\}$ followed by a sequence $\sigma\in\sum^*$, where $\epsilon$ denotes an empty input sequence. Then, $\delta^*:Q\times\sum^*\rightarrow Q$ is defined by + \note{Transitive State Transition Function $\delta^*$:}{Let $\sigma\sigma\in\sum^*$ be a sequence of inputs consisting of a single input $\sigma\in\sum\cup\{\epsilon\}$ followed by a sequence $\sigma\in\sum^*$, where $\epsilon$ denotes an empty input sequence. Then, $\delta^*:Q\times\sum^*\rightarrow Q$ is defined by + \begin{itemize*} + \item $\delta^*(q,\sigma\sigma^*)=\delta^*(\delta(q,\sigma),\sigma^*)$ + \item $\delta^*(q,\epsilon)=q$. + \end{itemize*} + } + + \note{HRU Safety}{(also simple-safety) A state q of an HRU model is called HRU safe with respect to a right $r\in R$ iff, beginning with q, there is no sequence of commands that enters r in an ACM cell where it did not exist in q.} + + According to Tripunitara and Li, simple-safety is defined as: + + \note{HRU Safety}{For a state $q=\{S_q,O_q,m_q\}\in Q$ and a right $r\in R$ of an HRU model $⟨Q,\sum,\delta,q_0,R⟩$, the predicate $safe(q,r)$ holds iff + $\forall q'= S_{q'},O_{q'},m_{q'} \in \{\delta^*(q,\sigma^*)|\sigma^*\in\sum^*\},\forall s\in S_{q'},\forall o\in O_{q'}: r\in m_{q'}(s,o)\Rightarrow s\in S_q \wedge o\in O_q \wedge r\in m_q(s,o)$. + We say that an HRU model is safe w.r.t. r iff $safe(q_0 ,r)$.} + + all states in $\{\delta^*(q,\sigma^*)|\sigma^*\in\sum^*\}$ validated except for $q'$ + \begin{tabular}{l|l|l|l} + $m_q$ & $o_1$ & $o_2$ & $o_3$ \\\hline + $s_1$ & $\{r_1,r_3\}$ & $\{r_1,r_3\}$ & $\{r_2\}$ \\ + $s_2$ & $\{r_1\}$ & $\{r_1\}$ & $\{r_2\}$ \\ + $s_3$ & $\varnothing$ & $\varnothing$ & $\{r_2\}$ + \end{tabular} + \begin{tabular}{l|l|l|l|l} + $m_{q'}$ & $o_1$ & $o_2$ & $o_3$ & $o_4$ \\\hline + $s_1$ & $\{r_1,r_3\}$ & $\{r_1\}$ & $\{r_2\}$ & $\varnothing$ \\ + $s_2$ & $\{r_1,r_2\}$ & $\{r_1\}$ & $\{r_2\}$ & $\{r_2\}$ \\ + $s_3$ & $\varnothing$ & $\varnothing$ & $\varnothing$ & $\varnothing$ + \end{tabular} \begin{itemize*} - \item $\delta^*(q,\sigma\sigma^*)=\delta^*(\delta(q,\sigma),\sigma^*)$ - \item $\delta^*(q,\epsilon)=q$. + \item $r_3\not\in m_{q'}(s_1,o_2)\wedge r_3\in m_q(s_1,o_1)\Rightarrow safe(q,r_3)$ + \item $r_2\in m_{q'}(s_2,o_1)\wedge r_2 \not\in m_q(s_2,o_1)\Rightarrow\lnot safe(q,r_2)$ + \item $r_2\in m_{q'}(s_2,o_4)\wedge o_4\not\in O_q\Rightarrow\lnot safe(q,r_2)$ \end{itemize*} - HRU Safety - A state q of an HRU model is called HRU safe with respect to a right $r\in R$ iff, beginning with q, there is no sequence of commands that enters r in an ACM cell where it did not exist in q. - According to Tripunitara and Li [2013], this property (Due to more technical details, it’s called simple-safety there.) is defined as: - > HRU Safety - > - > For a state $q=\{S_q,O_q,m_q\}\in Q$ and a right $r\in R$ of an HRU model $⟨Q,\sum,\delta,q_0,R⟩$, the predicate $safe(q,r)$ holds iff - > $\forall q'= S_{q'},O_{q'},m_{q'} \in \{\delta^*(q,\sigma^*)|\sigma^*\in\sum^*\},\forall s\in S_{q'},\forall o\in O_{q'}: r\in m_{q'}(s,o)\Rightarrow s\in S_q \wedge o\in O_q \wedge r\in m_q(s,o)$. - > - > We say that an HRU model is safe w.r.t. r iff $safe(q_0 ,r)$. - - \paragraph{HRU Safety} - Examples - \begin{itemize*} - \item Assume all states in $\{\delta^*(q,\sigma^*)|\sigma^*\in\sum^*\}$ have been validated except for $q'$: - \begin{itemize*} - \item State transfer 1 - \begin{tabular}{l|l|l|l} - $m_q$ & $o_1$ & $o_2$ & $o_3$ \\\hline - $s_1$ & $\{r_1,r_3\}$ & $\{r_1,r_3\}$ & $\{r_2\}$ \\ - $s_2$ & $\{r_1\}$ & $\{r_1\}$ & $\{r_2\}$ \\ - $s_3$ & $\varnothing$ & $\varnothing$ & $\{r_2\}$ - \end{tabular} - \begin{itemize*} - \item $\Rightarrow \delta^*(q,\sigma^*)$ - \end{itemize*} - \begin{tabular}{l|l|l|l} - $m_{q'}$ & $o_1$ & $o_2$ & $o_3$ \\\hline - $s_1$ & $\{r_1,r_3\}$ & $\{r_1\}$ & $\{r_2\}$ \\ - $s_2$ & $\{r_1,r_2\}$ & $\{r_1\}$ & $\{r_2\}$ \\ - $s_3$ & $\varnothing$ & $\varnothing$ & $\varnothing$ - \end{tabular} - \begin{itemize*} - \item $r_3\not\in m_{q'}(s_1,o_2)\wedge r_3\in m_q(s_1,o_1)\Rightarrow safe(q,r_3)$ - \item $r_2\in m_{q'}(s_2,o_1)\wedge r_2 \not\in m_q(s_2,o_1)\Rightarrow\lnot safe(q,r_2)$ - \end{itemize*} - \item State transfer 2 - \begin{tabular}{l|l|l|l} - $m_q$ & $o_1$ & $o_2$ & $o_3$ \\\hline - $s_1$ & $\{r_1,r_3\}$ & $\{r_1,r_3\}$ & $\{r_2\}$ \\ - $s_2$ & $\{r_1\}$ & $\{r_1\}$ & $\{r_2\}$ \\ - $s_3$ & $\varnothing$ & $\varnothing$ & $\{r_2\}$ - \end{tabular} - \begin{itemize*} - \item $\Rightarrow \delta^*(q,\sigma^*)$ - \end{itemize*} - \begin{tabular}{l|l|l|l|l} - $m_{q'}$ & $o_1$ & $o_2$ & $o_3$ & $o_4$ \\\hline - $s_1$ & $\{r_1,r_3\}$ & $\{r_1,r_3\}$ & $\{r_2\}$ & $\varnothing$ \\ - $s_2$ & $\{r_1\}$ & $\{r_1\}$ & $\{r_2\}$ & $\{r_2\}$ \\ - $s_3$ & $\varnothing$ & $\varnothing$ & $\{r_2\}$ & $\varnothing$ - \end{tabular} - \begin{itemize*} - \item $\forall s\in S_{q'}:r_3\not\in m_{q'}(s,o_4)\wedge r_3\in m_q(s_1,o_1)\wedge r_3\in m_q(s_1,o_2)\Rightarrow safe(q,r_3)$ - \item $r_2\in m_{q'}(s_2,o_4)\wedge o_4\not\in O_q\Rightarrow\lnot safe(q,r_2)$ - \end{itemize*} - \end{itemize*} - \end{itemize*} - - Let’s dissect the previous definitions: from a practical perspective, showing that an HRU model is safe w.r.t. r means to - 1. Search for any possible (reachable) successor state $q'$ of $q_0$ ("$\{\delta(q_0,\sigma)|\sigma\in\sum\}$") - 2. Visit all cells in $m_{q'}$ ("$\forall s\in S_{q'},\forall o\in O_{q'}:...$") - 3. If r is found in one of these cells ("$r\in m_{q'}(s,o)$"), check if - \begin{itemize*} - \item $m_q$ is defined for this very cell ("$s\in S_q\wedge o\in O_q$"), - \item $r$ was already contained in this very cell in $m_q$ ("$r\in m_q(s,o)$"). - \end{itemize*} - 4. Recursively proceed with 2. for any possible successor state $q''$ of $q'$ ("$\{\delta^*(q_0,\sigma^*)|\sigma^*\in\sum^*\}$") + showing that an HRU model is safe w.r.t. r means to + \begin{enumerate*} + \item Search for any possible (reachable) successor state $q'$ of $q_0$ + \item Visit all cells in $m_{q'}$ ($\forall s\in S_{q'},\forall o\in O_{q'}:...$) + \item If r is found in one of these cells ($r\in m_{q'}(s,o)$), check if + \begin{itemize*} + \item $m_q$ is defined for this very cell ($s\in S_q\wedge o\in O_q$), + \item $r$ was already contained in this very cell in $m_q$ ($r\in m_q...$). + \end{itemize*} + \item Recursiv. proceed with 2. for any possible successor state $q''$ of $q'$ + \end{enumerate*} Safety Decidability - > Theorem 1 [Harrison et al., 1976] - > - > Ingeneral, HRU safety is not decidable. + \note{Theorem 1 [Harrison]}{Ingeneral, HRU safety is not decidable.} - > Theorem 2 (also Harrison et al. [1976]) - > - > For mono-operational models, HRU safety is decidable. - - "So ... what is amono-operational HRU model?" $\rightarrow$ exactly one primitive for each operation in the STS: - - \begin{lstlisting}[ - language=C++, - showspaces=false - ] - command op(x_1 , ...,x_k) ::= if r_1 \in m(x_s1 ,x_o1 ) \wedge - ... \wedge - r_m \in m(x_sm,x_om) - then - p_1; - fi - \end{lstlisting} + \note{Theorem 2 [Harrison]}{For mono-operational models, HRU safety is decidable.} \begin{itemize*} - \item Theorem 1: See Harrison et al. [1976], reduction to the Halteproblem. - \item Theorem 2: We’ll have a closer look at this one ... - \begin{itemize*} - \item Insights into the operational principles modeled by HRU models - \item Demonstrates a method to prove safety property for a particular, given model - \item $\rightarrow$ "Proofs teach us how to build things so nothing more needs to be proven." (W. E. Kühnhauser) - \end{itemize*} + \item Insights into the operational principles modeled by HRU models + \item Demonstrates a method to prove safety property for a particular, given model + \item $\rightarrow$ ,,Proofs teach us how to build things so nothing more needs to be proven.'' (W. E. Kühnhauser) \end{itemize*} - \paragraph{Proof of Theorem} - \begin{itemize*} - \item Proof Sketch - 1. Find an upper bound for the length of all input sequences with different effects on the protection state w.r.t. safety - If such can be found: $\exists$ a finite number of input sequences with different effects - 2. All these inputs can be tested whether they violate safety. This test terminates because: - \begin{itemize*} - \item each input sequence is finite - \item there is only a finite number of relevant sequences - \end{itemize*} + a mono-operational HRU model $\rightarrow$ exactly one primitive for each operation in the STS + + \paragraph{Proof of Theorem - Proof Sketch} + \begin{enumerate*} + \item Find an upper bound for the length of all input sequences with different effects on the protection state w.r.t. safety + If such can be found: $\exists$ a finite number of input sequences with different effects + \item All these inputs can be tested whether they violate safety. This test terminates because: + \begin{itemize*} + \item each input sequence is finite + \item there is only a finite number of relevant sequences + \end{itemize*} \item $\rightarrow$ safety is decidable - \end{itemize*} - - Given a mono-operational HRU model. - Let $\sigma_1...\sigma_n$ be any sequence of inputs in $\sum^*$ that violates $safe(q,r)$, and let $p_1...p_n$ be the corresponding sequence of primitives (same length, since mono-operational). - - Proposition: For each such sequence, there is a corresponding finite sequence that - \begin{itemize*} - \item Still violates $safe(q,r)$ - \item Consists only of enter and two initial create primitives - \end{itemize*} - - In other words: For any input sequence,$\exists$ a finite sequence with the same effect. + \end{enumerate*} Proof: \begin{itemize*} - \item We construct these finite sequences ...$\rightarrow$ - \item Transform $\sigma_1...\sigma_n$ into shorter sequences with the same effect: - 1. Remove all input operations that contain delete or destroy primitives. The sequence still violates $safe(q,r)$, because conditions of successive commands must still be satisfied (no absence, only presence of rights is checked). - 2. Prepend the sequence with an initial create subject $s_{init}$ operation. This won’t change its netto effect, because the new subject isn’t used anywhere. - 3. Prune the last create subject s operation and substitute each following reference to s with $s_{init}$. Repeat until allcreate subjectoperations are removed, except from the initialcreate subject sinit. - 4. Same as steps 2 and 3 for objects. - 5. Remove all redundant enter operations (remember: each matrix cell is a set $\rightarrow$ unique elements). + \item construct finite sequences ...$\rightarrow$ + \item Transform $\sigma_1...\sigma_n$ into shorter sequences + \begin{enumerate*} + \item Remove all input operations that contain delete or destroy primitives (no absence, only presence of rights is checked). + \item Prepend the sequence with an initial create subject $s_{init}$ operation. + \item Prune the last create subject s operation and substitute each following reference to s with $s_{init}$. Repeat until all create subject operations are removed, except from the initial create subject $s_{init}$. + \item Same as steps 2 and 3 for objects. + \item Remove all redundant enter operations. + \end{enumerate*} \end{itemize*} - Example: - \begin{tabular}{l|l|l|l|l|l} - init & 1. & 2. & 3. & 4. & 5. \\\hline - ... & ... & create subject $s_{init}$; & create subject $s_{init}$; & create subject $s_{init}$; & create subject $s_{init}$; \\ - ... & ... & ... & ... & create object $o_{init}$ & create object $o_{init}$ \\ - create subject x2; & create subject x2; & create subject x2; & - & - & - \\ - create object x5; & create object x5; & create object x5; & create object x5; & - & - \\ - enter r1 into m(x2,x5); & enter r1 into m(x2,x5); & enter r1 into m(x2,x5); & enter r1 into $m(s_{init},x5)$; & enter r1 into $m(s_{init},o_{init})$; & enter r1 into $m(s_{init},o_{init})$; \\ - enter r2 into m(x2,x5); & enter r2 into m(x2,x5); & enter r2 into m(x2,x5); & enter r2 into $m(s_{init},x5)$; & enter r2 into $m(s_{init},o_{init})$; & enter r2 into $m(s_{init},o_{init})$; \\ - create subject x7; & create subject x7; & create subject x7; & - & - & - \\ - delete r1 from m(x2,x5); & - & - & - & - & - \\ - destroy subject x2; & - & - & - & - & - \\ - enter r1 into m(x7,x5); & enter r1 into m(x7,x5); & enter r1 into m(x7,x5); & enter r1 into $m(s_{init},x5)$; & enter r1 into $m(s_{init},o_{init})$; & - \\ - ... & ... & ... & ... & ... & ... + \begin{tabular}{l|l} + init & 5. \\\hline + ... & create subject $s_{init}$; \\ + ... & create object $o_{init}$ \\ + create subject $x2;$ & - \\ + create object $x5;$ & - \\ + enter r1 into $m(x2,x5);$ & enter r1 into $m(s_{init},o_{init})$; \\ + enter r2 into $m(x2,x5);$ & enter r2 into $m(s_{init},o_{init})$; \\ + create subject $x7;$ & - \\ + delete r1 from $m(x2,x5)$; & - \\ + destroy subject $x2;$ & - \\ + enter r1 into $m(x7,x5);$ & - \end{tabular} - Observations + Conclusions from these Theorems: Dilemma: \begin{itemize*} - \item after step 3: - \begin{itemize*} - \item Except for $s_{init}$, the sequence creates no more subjects - \item All rights of the formerly created subjects are accumulated in $s_{init}\rightarrow$ for the evaluation of $safe(q,r)$, nothing has changed: - \begin{itemize*} - \item generally: $\forall s\in S_{q'},\forall o\in O_{q'}:r\in m_{q'}(s,o)\Rightarrow s\in S_q\wedge o\in O_q\wedge r\in m_q(s,o)$ - \item in this case: $\forall s\in S_{q'},\forall o\in O_{q'}:r\in m_{q'}(s,o)\Rightarrow s\not=s_{init}\wedge o\in O_q\wedge r\in m_q(s,o)$ - \end{itemize*} - \item The sequence is generally shorter (never longer) than before - \end{itemize*} - \item Final Observations - \begin{itemize*} - \item Except for $s_{init}$ and $o_{init}$, the sequence creates no subjects or objects - \item All entered rights are accumulated in $m_{q'}(s_{init},o_{init})$: - \begin{itemize*} - \item generally: $\forall s\in S_{q'},\forall o\in O_{q'}:r\in m_{q'}(s,o)\Rightarrow s\in S_q\wedge o\in O_q\wedge r\in m_q(s,o)$ - \item here: $\forall s\in S_{q'},\forall o\in O_{q'}:r\in m_{q'}(s,o)\Rightarrow s\not=s_{init}\wedge o\not=o_{init}\wedge r\in m_q(s,o)$ - \end{itemize*} - \item This sequence still violates $safe(q,r)$, but its length is restricted to $(|S_q| + 1)(|O_q|+1)|R|+2$ because - \begin{itemize*} - \item Each enter must enter a new right into a cell - \item The number of cells is restricted to $(|S_q| + 1)(|O_q|+1)$ - \end{itemize*} - \end{itemize*} + \item General (unrestricted) HRU models + \begin{itemize*} + \item have strong expressiveness $\rightarrow$ can model a broad range of AC policies + \item are hard to analyze: algorithms and tools for safety analysis + \end{itemize*} + \item Mono-operational HRU models + \begin{itemize*} + \item have weak expressiveness $\rightarrow$ goes as far as uselessness (only create files) + \item are efficient to analyze: algorithms and tools for safety analysis + \item $\rightarrow$ are always guaranteed to terminate + \item $\rightarrow$ are straight-forward to design + \end{itemize*} \end{itemize*} - Conclusions from these Theorems - \begin{itemize*} - \item Dilemma: - \begin{itemize*} - \item General (unrestricted) HRU models - \begin{itemize*} - \item have strong expressiveness $\rightarrow$ can model a broad range of AC policies - \item are hard to analyze: algorithms and tools for safety analysis - \begin{itemize*} - \item $\rightarrow$ cannot certainly produce accurate results - \item $\rightarrow$ are hard to design for approximative results - \end{itemize*} - \end{itemize*} - \item Mono-operational HRU models - \begin{itemize*} - \item have weak expressiveness $\rightarrow$ goes as far as uselessness: e. g. for modeling Unix creat(can only create files, sockets, IPC, ... that no user process can access!) - \item are efficient to analyze: algorithms and tools for safety analysis - \item $\rightarrow$ are always guaranteed to terminate - \item $\rightarrow$ are straight-forward to design - \end{itemize*} - \end{itemize*} - \end{itemize*} - - Consequences: - \begin{itemize*} - \item Model variants with restricted yet usable expressiveness have been proposed - \item Heuristic analysis methods try to provide educated guesses about safety of unrestricted HRU - \end{itemize*} - - \paragraph{(A) Restricted Model Variants} Static HRU Models \begin{itemize*} @@ -1926,16 +1195,16 @@ 1. Static phase: Infer knowledge from the model that helps heuristic to make "good" decisions. \begin{itemize*} \item $\rightarrow$ Runtime: polynomial in model size ($q_0 + STS$) - 2. Simulation phase: The automaton is implemented and, starting with $q_0$, fed with inputs $\sigma=⟨op,x⟩$ - \begin{itemize*} - \item $\rightarrow$ For each $\sigma$, the heuristic has to decide: - \begin{itemize*} - \item which operation op to use - \item which vector of arguments x to pass - \item which $q_i$ to use from the states in $Q$ known so far - \end{itemize*} - \item Termination: As soon as $\sigma(q_i,\sigma)$ violates $safe(q_0,r)$. - \end{itemize*} + 2. Simulation phase: The automaton is implemented and, starting with $q_0$, fed with inputs $\sigma=⟨op,x⟩$ + \begin{itemize*} + \item $\rightarrow$ For each $\sigma$, the heuristic has to decide: + \begin{itemize*} + \item which operation op to use + \item which vector of arguments x to pass + \item which $q_i$ to use from the states in $Q$ known so far + \end{itemize*} + \item Termination: As soon as $\sigma(q_i,\sigma)$ violates $safe(q_0,r)$. + \end{itemize*} \end{itemize*} Goal: Iteratively build up the (possibly infinite!) $Q$ for a model to falsify safety by example (finding a violating, but possible protection state). @@ -1944,11 +1213,11 @@ \begin{itemize*} \item Termination: Well ... we only have a semi-decidable problem here: It can be guaranteed that a model is unsafe if we terminate. We cannot ever prove the opposite, however! ($\rightarrow$ safety undecidability) \item Performance: A few results - \begin{itemize*} - \item 2013:Model size 10 000 $\approx 2215$ s - \item 2018:Model size 10 000 $\approx 0,36$ s - \item 2018:Model size 10 000 000 $\approx 417$ s - \end{itemize*} + \begin{itemize*} + \item 2013:Model size 10 000 $\approx 2215$ s + \item 2018:Model size 10 000 $\approx 0,36$ s + \item 2018:Model size 10 000 000 $\approx 417$ s + \end{itemize*} \end{itemize*} Achievements: @@ -1996,30 +1265,29 @@ \begin{itemize*} \item Foundation of a TAM model is an HRU model $⟨Q,\sum,\delta,q_0 ,R⟩$, where $Q= 2^S\times 2^O\times M$ \item However: $S\subseteq O$, i. e.: - \begin{itemize*} - \item all subjects can also act as objects (=targets of an access) - \item $\rightarrow$ useful for modeling e. g. delegation ("s has the right to grant s' her read-right") - \item objects in $O\backslash S$: pure objects - \end{itemize*} + \begin{itemize*} + \item all subjects can also act as objects (=targets of an access) + \item $\rightarrow$ useful for modeling e. g. delegation ("s has the right to grant s' her read-right") + \item objects in $O\backslash S$: pure objects + \end{itemize*} \item Each $o\in O$ has a type from a type set $T$ assigned through a mapping $type:O\rightarrow T$ \item An HRU model is a special case of a TAM model: - \begin{itemize*} - \item $T=\{tSubject,tObject\}$ - \item $\forall s\in S:type(s)=tSubject; \forall o\in O\backslash S:type(o)=tObject$ - \end{itemize*} + \begin{itemize*} + \item $T=\{tSubject,tObject\}$ + \item $\forall s\in S:type(s)=tSubject; \forall o\in O\backslash S:type(o)=tObject$ + \end{itemize*} \end{itemize*} - > TAM Security Model - > - > A TAM model is a deterministic automaton $⟨Q,\sum,\delta,q_0 ,T,R⟩$ where - \begin{itemize*} - \item $Q= 2^S\times 2^O\times TYPE\times M$ is the state space where $S$ and $O$ are subjects set and objects set as in HRU, where $S\subseteq O$, $TYPE=\{type|type:O\rightarrow T\}$ is a set of possible type functions, $M$ is the set of possible $ACMs$ as in HRU, - \item $\sum=OP\times X$ is the (finite) input alphabet where $OP$ is a set of operations as in HRU, $X=O^k$ is a set of $k$-dimensional vectors of arguments (objects) of these operations, - \item $\delta:Q\times\sum\rightarrow Q$ is the state transition function, - \item $q_0\in Q$ is the initial state, - \item $T$ is a static (finite) set of types, - \item $R$ is a (finite) set of access rights. - \end{itemize*} + \note{TAM Security Model}{A TAM model is a deterministic automaton $⟨Q,\sum,\delta,q_0 ,T,R⟩$ where + \begin{itemize*} + \item $Q= 2^S\times 2^O\times TYPE\times M$ is the state space where $S$ and $O$ are subjects set and objects set as in HRU, where $S\subseteq O$, $TYPE=\{type|type:O\rightarrow T\}$ is a set of possible type functions, $M$ is the set of possible $ACMs$ as in HRU, + \item $\sum=OP\times X$ is the (finite) input alphabet where $OP$ is a set of operations as in HRU, $X=O^k$ is a set of $k$-dimensional vectors of arguments (objects) of these operations, + \item $\delta:Q\times\sum\rightarrow Q$ is the state transition function, + \item $q_0\in Q$ is the initial state, + \item $T$ is a static (finite) set of types, + \item $R$ is a (finite) set of access rights. + \end{itemize*} + } State Transition Scheme (STS) $\delta:Q\times\sum\rightarrow Q$ is defined by a set of specifications: @@ -2051,14 +1319,14 @@ TAM-specific \begin{itemize*} \item Primitives: - \begin{itemize*} - \item enter r into m($x_s$,$x_o$) - \item delete r from m($x_s$,$x_o$) - \item create subject $x_s$ of type $t_s$ - \item create object $x_o$ of type $t_o$ - \item destroy subject $x_s$ - \item destroy object $x_o$ - \end{itemize*} + \begin{itemize*} + \item enter r into m($x_s$,$x_o$) + \item delete r from m($x_s$,$x_o$) + \item create subject $x_s$ of type $t_s$ + \item create object $x_o$ of type $t_o$ + \item destroy subject $x_s$ + \item destroy object $x_o$ + \end{itemize*} \item Observation: $S$ and $O$ are dynamic (as in HRU), thus $type:O\rightarrow T$ must be dynamic too (cf. definition of $Q$ in TAM). \end{itemize*} @@ -2066,41 +1334,41 @@ \begin{itemize*} \item Example Scenario: Originator Controlled Access Rights (ORCON Policy) \item Goal: To illustrate usefulness/convenience of type system - \begin{itemize*} - \item ORCON describes sub-problem of larger policies - \item Information flow confinement required by ORCON is tricky to do in HRU ("This information may not flow beyond ...") - \end{itemize*} + \begin{itemize*} + \item ORCON describes sub-problem of larger policies + \item Information flow confinement required by ORCON is tricky to do in HRU ("This information may not flow beyond ...") + \end{itemize*} \item The Problem - \begin{itemize*} - \item Creator/owner of a document shouldpermanently retain controlover its accesses - \item Neither direct nor indirect (by copying) right proliferation - \item Application scenarios: Digital rights management, confidential sharing (online social networks!) - \item %![](Assets/Systemsicherheit-orcon-problem.png) - \end{itemize*} + \begin{itemize*} + \item Creator/owner of a document shouldpermanently retain controlover its accesses + \item Neither direct nor indirect (by copying) right proliferation + \item Application scenarios: Digital rights management, confidential sharing (online social networks!) + \item %![](Assets/Systemsicherheit-orcon-problem.png) + \end{itemize*} \item Solution with TAM - \begin{itemize*} - \item Idea: A confined subject type that can never execute any operation other than reading - \item Model Initialization: - \begin{itemize*} - \item Subjects: $S_0=\{ann,bob,chris\}$ - \item Objects: $O_0 =S_0\cup\{projectX\}$ - \item Operations: $\rightarrow$ next ... - \item Rights: $R=\{read,write,cread,own,parent\}$ - \item Types: $T=\{s,cs,co\}$ (regular subject,confined subject/object) - \item $type_0$: - \begin{itemize*} - \item $type_0(ann)=s$ - \item $type_0(bob)=s$ - \item $type_0(projectX)=co$ - \end{itemize*} - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Idea: A confined subject type that can never execute any operation other than reading + \item Model Initialization: + \begin{itemize*} + \item Subjects: $S_0=\{ann,bob,chris\}$ + \item Objects: $O_0 =S_0\cup\{projectX\}$ + \item Operations: $\rightarrow$ next ... + \item Rights: $R=\{read,write,cread,own,parent\}$ + \item Types: $T=\{s,cs,co\}$ (regular subject,confined subject/object) + \item $type_0$: + \begin{itemize*} + \item $type_0(ann)=s$ + \item $type_0(bob)=s$ + \item $type_0(projectX)=co$ + \end{itemize*} + \end{itemize*} + \end{itemize*} \item Model Behavior (Example) - \begin{itemize*} - \item ann creates ORCON object projectX (STS command createOrconObject) - \item ann grants cread ("confined read") right for projectX to bob (STS command grantCRead) - \item bob uses cread to create confined subject chris with permission to read projectX (STS command useCRead) - \end{itemize*} + \begin{itemize*} + \item ann creates ORCON object projectX (STS command createOrconObject) + \item ann grants cread ("confined read") right for projectX to bob (STS command grantCRead) + \item bob uses cread to create confined subject chris with permission to read projectX (STS command useCRead) + \end{itemize*} \end{itemize*} \begin{tabular}{l|l|l|l|l} @@ -2113,7 +1381,7 @@ Model Behavior (STS): The State Transition Scheme \begin{itemize*} \item createOrconObject - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2128,7 +1396,7 @@ \end{lstlisting} \item grantCRead - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2140,7 +1408,7 @@ \end{lstlisting} \item useCRead - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2154,7 +1422,7 @@ \end{lstlisting} \item Enable ann to revoke cread from bob: - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2165,7 +1433,7 @@ fi \end{lstlisting} \item Enable ann to destroy conf. object projectX: - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2176,7 +1444,7 @@ fi \end{lstlisting} \item Enable ann to destroy conf. subject chris: - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2187,7 +1455,7 @@ fi \end{lstlisting} \item Enable bob to destroy conf. subject chris: - \begin{lstlisting}[ + \begin{lstlisting}[ language=Bash, showspaces=false ] @@ -2201,23 +1469,23 @@ \begin{itemize*} \item Commands 1.-3.: - \begin{itemize*} - \item Authorize the steps in the example above - \item Are monotonic - \end{itemize*} + \begin{itemize*} + \item Authorize the steps in the example above + \item Are monotonic + \end{itemize*} \item Commands 4.-7.: - \begin{itemize*} - \item Will control right revocation $\rightarrow$ essence of originator control - \item Are not monotonic (consequences ...) - \end{itemize*} + \begin{itemize*} + \item Will control right revocation $\rightarrow$ essence of originator control + \item Are not monotonic (consequences ...) + \end{itemize*} \item Summary - \begin{itemize*} - \item Contributions of ORCON Example - \item Owner ("originator") retains full control over - \item Use of her confined objects by third parties $\rightarrow$ transitive right revocation - \item Subjects using (or misusing) these objects $\rightarrow$ destruction of these subjects - \item Subjects using such objects are confined: cannot forward read information - \end{itemize*} + \begin{itemize*} + \item Contributions of ORCON Example + \item Owner ("originator") retains full control over + \item Use of her confined objects by third parties $\rightarrow$ transitive right revocation + \item Subjects using (or misusing) these objects $\rightarrow$ destruction of these subjects + \item Subjects using such objects are confined: cannot forward read information + \end{itemize*} \end{itemize*} \paragraph{TAM Safety Decidability} @@ -2232,17 +1500,14 @@ \paragraph{Acyclic TAM Models} Auxiliary analysis tools for TAM models: - > Parent- and Child-Types - > - > For any operation $op$ with arguments $⟨x_1,t_1⟩,⟨x_2,t_2⟩,...,⟨x_k,t_k⟩$ in an STS of a TAM model, it holds that $t_i, 1\leq i\leq k$ - \begin{itemize*} - \item is a child type in op if one of its primitives creates a subject or object $x_i$ of type $t_i$, - \item is a parent type in op if none of its primitives creates a subject or object $x_i$ of type $t_i$. - \end{itemize*} + \note{Parent- and Child-Types}{For any operation $op$ with arguments $⟨x_1,t_1⟩,⟨x_2,t_2⟩,...,⟨x_k,t_k⟩$ in an STS of a TAM model, it holds that $t_i, 1\leq i\leq k$ + \begin{itemize*} + \item is a child type in op if one of its primitives creates a subject or object $x_i$ of type $t_i$, + \item is a parent type in op if none of its primitives creates a subject or object $x_i$ of type $t_i$. + \end{itemize*} + } - > Type Creation Graph - > - > The type creation graph $TCG=⟨T,E=T\times T⟩$ for the STS of a TAM model is a directed graph with vertex set $T$ and an $edge⟨u,v⟩\in E$ iff $\exists op\in OP:u$ is a parent type in $op\wedge v$ is a child type in op. + \note{Type Creation Graph}{The type creation graph $TCG=⟨T,E=T\times T⟩$ for the STS of a TAM model is a directed graph with vertex set $T$ and an $edge⟨u,v⟩\in E$ iff $\exists op\in OP:u$ is a parent type in $op\wedge v$ is a child type in op.} Example STS: \begin{lstlisting}[ @@ -2268,32 +1533,30 @@ Safety Decidability: We call a TAM model acyclic, iff its TCG is acyclic. - > Theorem [Sandhu, 1992, Theorem 5] - > - > Safety of a ternary, acyclic, monotonous TAM model (TAMTAM) is decidable in polynomial time in the size of $m_0$. + \note{Theorem [Sandhu, 1992, Theorem 5]}{Safety of a ternary, acyclic, monotonous TAM model (TAMTAM) is decidable in polynomial time in the size of $m_0$.} \begin{itemize*} \item Crucial property acyclic, intuitively: - \begin{itemize*} - \item Evolution of the system (protection state transitions) checks both rights in the ACMas well as argument types - \item TCG is acyclic $\Rightarrow\exists$ a finite sequence of possible state transitions after which no input tuple with argument types, that were not already considered before, can be found - \item One may prove that an algorithm, which tries to expandall possible different follow-up states from $q_0$, may terminate after this finite sequence - \item Proof details: SeeSandhu [1992]. - \end{itemize*} + \begin{itemize*} + \item Evolution of the system (protection state transitions) checks both rights in the ACMas well as argument types + \item TCG is acyclic $\Rightarrow\exists$ a finite sequence of possible state transitions after which no input tuple with argument types, that were not already considered before, can be found + \item One may prove that an algorithm, which tries to expandall possible different follow-up states from $q_0$, may terminate after this finite sequence + \item Proof details: SeeSandhu [1992]. + \end{itemize*} \end{itemize*} Expressive Power of TAMTAM \begin{itemize*} \item MTAM: obviously same expressive power as monotonic HRU (MHRU) $\rightarrow$ cannot model: - \begin{itemize*} - \item transfer of rights: "take r from ... and in turn grant r to ..." - \item countdown rights: "r can only be used n times" - \end{itemize*} + \begin{itemize*} + \item transfer of rights: "take r from ... and in turn grant r to ..." + \item countdown rights: "r can only be used n times" + \end{itemize*} \item ORCON example (and many others): allow to ignore non-monotonic command $s$ from STS, e.g. 4.-7., since they - \begin{itemize*} - \item only remove rights - \item are reversible (e. g.: undo 4. by 2.; compensate 7. by 3. where the new subject takes roles of the destroyed one) - \end{itemize*} + \begin{itemize*} + \item only remove rights + \item are reversible (e. g.: undo 4. by 2.; compensate 7. by 3. where the new subject takes roles of the destroyed one) + \end{itemize*} \item AMTAM: most MTAM STS may be re-written as acyclic(cf. ORCON example) \item TAMTAM: expressive power equivalent to AMTAM \end{itemize*} @@ -2307,55 +1570,55 @@ IBAC Summary \begin{itemize*} \item We May Now - \begin{itemize*} - \item Model identity-based AC policies (IBAC) - \item Analyze them w. r. t. basic security properties (right proliferation) - \item $\rightarrow$ Minimize specification errors - \item $\rightarrow$ Minimize implementation errors - \end{itemize*} + \begin{itemize*} + \item Model identity-based AC policies (IBAC) + \item Analyze them w. r. t. basic security properties (right proliferation) + \item $\rightarrow$ Minimize specification errors + \item $\rightarrow$ Minimize implementation errors + \end{itemize*} \item Approach - \begin{itemize*} - \item Unambiguous policy representation through formal notation - \item Prediction and/or verification of mission-critical properties - \item Derivation of implementation concepts - \end{itemize*} + \begin{itemize*} + \item Unambiguous policy representation through formal notation + \item Prediction and/or verification of mission-critical properties + \item Derivation of implementation concepts + \end{itemize*} \item Model Range - \begin{itemize*} - \item Static models: - \begin{itemize*} - \item Access control function (ACF): $f:S\times O\times OP\rightarrow \{true,false\}$ - \item Access control matrix (ACM): $m:S\times O\rightarrow 2^{OP}$ - \item $\rightarrow$ Static analysis: Which rights are assigned to whom, which (indirect) information flows are possible - \item $\rightarrow$ Implementation: Access control lists (ACLs), e.g. in OS, (DB)IS - \end{itemize*} - \item Dynamic models: - \begin{itemize*} - \item ACM plus deterministic automaton $\rightarrow$ Analysis of dynamic behavior: HRU safety - \begin{itemize*} - \item generally undecidable - \item decidable under specific restrictions: monotonous mono-conditional, static, typed, etc. - \item identifying and explaining safety-violations, in case such (are assumed to) exists: heuristic analysis algorithms - \end{itemize*} - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Static models: + \begin{itemize*} + \item Access control function (ACF): $f:S\times O\times OP\rightarrow \{true,false\}$ + \item Access control matrix (ACM): $m:S\times O\rightarrow 2^{OP}$ + \item $\rightarrow$ Static analysis: Which rights are assigned to whom, which (indirect) information flows are possible + \item $\rightarrow$ Implementation: Access control lists (ACLs), e.g. in OS, (DB)IS + \end{itemize*} + \item Dynamic models: + \begin{itemize*} + \item ACM plus deterministic automaton $\rightarrow$ Analysis of dynamic behavior: HRU safety + \begin{itemize*} + \item generally undecidable + \item decidable under specific restrictions: monotonous mono-conditional, static, typed, etc. + \item identifying and explaining safety-violations, in case such (are assumed to) exists: heuristic analysis algorithms + \end{itemize*} + \end{itemize*} + \end{itemize*} \item Limitations - \begin{itemize*} - \item IBAC models are fundamental: KISS - \item IBAC models provide basic expressiveness only: - \begin{itemize*} - \item Comparable to "assembler programs for writing AC policies" - \item Imagine writing a sophisticated end-user application in assembler: - \begin{itemize*} - \item reserve and keep track of memory layout and addresses $\approx$ create and maintain individual rights for thousands of subjects, billions of objects - \item display comfortable GUI by writing to the video card framebuffer $\approx$ specify sophisticated workflows through an HRU STS - \end{itemize*} - \end{itemize*} - \item For more application-oriented policy semantics: - \begin{itemize*} - \item Large information systems: many users, many databases, files, ... $\rightarrow$ Scalability problem - \item Access decisions not just based on subjects, objects, and operations $\rightarrow$ Abstraction problem - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item IBAC models are fundamental: KISS + \item IBAC models provide basic expressiveness only: + \begin{itemize*} + \item Comparable to "assembler programs for writing AC policies" + \item Imagine writing a sophisticated end-user application in assembler: + \begin{itemize*} + \item reserve and keep track of memory layout and addresses $\approx$ create and maintain individual rights for thousands of subjects, billions of objects + \item display comfortable GUI by writing to the video card framebuffer $\approx$ specify sophisticated workflows through an HRU STS + \end{itemize*} + \end{itemize*} + \item For more application-oriented policy semantics: + \begin{itemize*} + \item Large information systems: many users, many databases, files, ... $\rightarrow$ Scalability problem + \item Access decisions not just based on subjects, objects, and operations $\rightarrow$ Abstraction problem + \end{itemize*} + \end{itemize*} \end{itemize*} $\rightarrow$ "New" paradigm (early-mid 90s): Role-based Access Control @@ -2371,56 +1634,54 @@ Goals of RBAC: \begin{itemize*} \item Solving these problems results in smaller modeling effort results in smaller chance of human errors made in the process: - \begin{itemize*} - \item Improved scalability and manageability - \item Improved, application-oriented semantics: roles$\approx$functions in organizations - \end{itemize*} + \begin{itemize*} + \item Improved scalability and manageability + \item Improved, application-oriented semantics: roles$\approx$functions in organizations + \end{itemize*} \end{itemize*} RBAC Application Domains \begin{itemize*} \item Public health care systems - \begin{itemize*} - \item Roles: Patient, physician, therapist, pharmacist, insurer, legislator, ... - \end{itemize*} + \begin{itemize*} + \item Roles: Patient, physician, therapist, pharmacist, insurer, legislator, ... + \end{itemize*} \item Financial services - \begin{itemize*} - \item Roles: Client, consultant, analyst, product manager, ... - \end{itemize*} + \begin{itemize*} + \item Roles: Client, consultant, analyst, product manager, ... + \end{itemize*} \item Operating systems - \begin{itemize*} - \item Roles: System admin, webserver admin, database admin, key account user, user, ... - \end{itemize*} + \begin{itemize*} + \item Roles: System admin, webserver admin, database admin, key account user, user, ... + \end{itemize*} \end{itemize*} RBAC Idea \begin{itemize*} \item Models include smart abstraction: roles \item Access control rules are specified based on roles instead of identities: - \begin{itemize*} - \item "All ward physiciansare allowed to read EPRs." - \item "Allnursesare allowed to log body temperature." - \end{itemize*} + \begin{itemize*} + \item "All ward physiciansare allowed to read EPRs." + \item "Allnursesare allowed to log body temperature." + \end{itemize*} \item Compared to IBAC - \begin{itemize*} - \item IBAC Semantics: - \begin{itemize*} - \item Subjects, objects, and rights for executing operations - \item Access rules are based onidentity of individualsubjects and objects - \end{itemize*} - \item RBAC Semantics: - \begin{itemize*} - \item Users, roles, and rights for executing operations - \item Access rules are based onrolesof users $\rightarrow$ on assignments: - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item IBAC Semantics: + \begin{itemize*} + \item Subjects, objects, and rights for executing operations + \item Access rules are based onidentity of individualsubjects and objects + \end{itemize*} + \item RBAC Semantics: + \begin{itemize*} + \item Users, roles, and rights for executing operations + \item Access rules are based onrolesof users $\rightarrow$ on assignments: + \end{itemize*} + \end{itemize*} \end{itemize*} RBAC Security Model Definition - > Basic RBAC model: "$RBAC_0$" [Sandhu, 1994]: - > - > An RBAC 0 model is a tuple $⟨U,R,P,S,UA,PA,user,roles⟩$ where + \note{Basic RBAC model: "$RBAC_0$" [Sandhu, 1994]}{An RBAC 0 model is a tuple $⟨U,R,P,S,UA,PA,user,roles⟩$ where \begin{itemize*} \item U is a set of user identifiers, \item R is a set of role identifiers, @@ -2431,6 +1692,7 @@ \item $user:S\rightarrow U$ is a total function mapping sessions to users, \item $roles:S\rightarrow 2^R$ is a total function mapping sessions to sets of roles such that $\forall s\in S:r\in roles(s)\Rightarrow ⟨user(s),r⟩\in UA$. \end{itemize*} + } Interpretation \begin{itemize*} @@ -2440,15 +1702,15 @@ \item The user-role-relation $UA\subseteq U\times R$ defines which roles are available to users at any given time $\rightarrow$ must be assumed during runtime first, before they are usable! \item The permission-role-relation $PA\subseteq P\times R$ defines which permissions are associate with roles \item $UA$ and $PA$ describe static policy rules: Roles available to a user are not considered to possibly change, same with permissions associated with a role. Examples: - \begin{itemize*} - \item "Bob may assume the role of a developer; Ann may assume the role of a developer or a project manager; ..." - \item "A developer may read and write the project documentation; a project manager may create branches of a source code repository; ..." - \end{itemize*} + \begin{itemize*} + \item "Bob may assume the role of a developer; Ann may assume the role of a developer or a project manager; ..." + \item "A developer may read and write the project documentation; a project manager may create branches of a source code repository; ..." + \end{itemize*} \item Sessions $S$ describe dynamic assignments of roles $\rightarrow$ a session $s\in S$ models when a user is logged in(where she may use some role(s) available to her as per $UA$): - \begin{itemize*} - \item The session-user-mapping user: $S\rightarrow U$ associates a session with its ("owning") user - \item The session-roles-mapping roles: $S\rightarrow 2^R$ associates a session with the set of roles currently assumed by that user (active roles) - \end{itemize*} + \begin{itemize*} + \item The session-user-mapping user: $S\rightarrow U$ associates a session with its ("owning") user + \item The session-roles-mapping roles: $S\rightarrow 2^R$ associates a session with the set of roles currently assumed by that user (active roles) + \end{itemize*} \end{itemize*} %![](Assets/Systemsicherheit-rbac-0.png) @@ -2461,17 +1723,19 @@ \item Authorization in practice: access rules have to be defined for operations on objects (cf. IBAC) \item IBAC approach: access control function $f:S\times O\times OP\rightarrow \{true,false\}$ \item RBAC approach: implicitly defined through $P\rightarrow$ made explicit: $P\subseteq O\times OP$ is a set of permission tuples $⟨o,op⟩$ where - \begin{itemize*} - \item $o\in O$ is an object from a set of object identifiers, - \item $op\in OP$ is an operation from a set of operation identifiers. - \end{itemize*} + \begin{itemize*} + \item $o\in O$ is an object from a set of object identifiers, + \item $op\in OP$ is an operation from a set of operation identifiers. + \end{itemize*} \item We may now define the $ACF$ for $RBAC_0$: \end{itemize*} - > $RBAC_0$ ACF - > - > $f_{RBAC_0}:U \times O\times OP\rightarrow\{true,false\}$ where - > $f_{RBAC_0} (u,o,op)= \begin{cases} true, \quad \exists r\in R,s\in S:u=user(s)\wedge r\in roles(s)\wedge ⟨⟨o,op⟩,r⟩ \in PA \\ false, \quad\text{ otherwise } \end{cases}$. + \note{$RBAC_0$ ACF}{ + \begin{itemize*} + \item $f_{RBAC_0}:U \times O\times OP\rightarrow\{true,false\}$ where + \item $f_{RBAC_0} (u,o,op)= \begin{cases} true, \quad \exists r\in R,s\in S:u=user(s)\wedge r\in roles(s)\wedge ⟨⟨o,op⟩,r⟩ \in PA \\ false, \quad\text{ otherwise } \end{cases}$. + \end{itemize*} + } \paragraph{RBAC96 Model Family} Sandhu et al. [1996] @@ -2486,37 +1750,36 @@ RBAC 1 : Role Hierarchies \begin{itemize*} \item Observation: Roles in organizations often overlap: - \begin{itemize*} - \item Users in different roles havecommon permissions: "Any project manager must have the same permissions as any developer in the same project." - \item Approach 1: disjoint permissions for roles proManager and proDev $\rightarrow$ any proManager user must always have proDev assigned and activated for any of her workflows $\rightarrow$ role assignment redundancy - \item Approach 2: overlapping permissions: $\forall p\in P:⟨p,proDev⟩ \in PA\Rightarrow ⟨p,proManager⟩ \in PA\rightarrow$ any permission for project developers must be assigned to two different roles $\rightarrow$ role definition redundancy - \item Two types of redundancy $\rightarrow$ undermines scalability goal of RBAC! - \end{itemize*} + \begin{itemize*} + \item Users in different roles havecommon permissions: "Any project manager must have the same permissions as any developer in the same project." + \item Approach 1: disjoint permissions for roles proManager and proDev $\rightarrow$ any proManager user must always have proDev assigned and activated for any of her workflows $\rightarrow$ role assignment redundancy + \item Approach 2: overlapping permissions: $\forall p\in P:⟨p,proDev⟩ \in PA\Rightarrow ⟨p,proManager⟩ \in PA\rightarrow$ any permission for project developers must be assigned to two different roles $\rightarrow$ role definition redundancy + \item Two types of redundancy $\rightarrow$ undermines scalability goal of RBAC! + \end{itemize*} \item Solution - \begin{itemize*} - \item Role hierarchy: Eliminates role definition redundancy through permissions inheritance - \end{itemize*} + \begin{itemize*} + \item Role hierarchy: Eliminates role definition redundancy through permissions inheritance + \end{itemize*} \item Modeling Role Hierarchies - \begin{itemize*} - \item Lattice here: $⟨R,\leq⟩$ - \item Hierarchy expressed through dominance relation: $r_1\leq r_2 \Leftrightarrow r_2$ inherits any permissions from $r_1$ - \item Interpretation - \begin{itemize*} - \item Reflexivity: any role consists of ("inherits") its own permissions $\forall r\in R:r\leq r$ - \item Antisymmetry: no two different roles may mutually inherit their respective permissions $\forall r_1 ,r_2\in R:r_1\leq r_2\wedge r_2\leq r_1\Rightarrow r_1=r_2$ - \item Transitivity: permissions may be inherited indirectly $\forall r_1,r_2,r_3\in R:r_1\leq r_2 \wedge r_2\leq r_3\Rightarrow r_1\leq r_3$ - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Lattice here: $⟨R,\leq⟩$ + \item Hierarchy expressed through dominance relation: $r_1\leq r_2 \Leftrightarrow r_2$ inherits any permissions from $r_1$ + \item Interpretation + \begin{itemize*} + \item Reflexivity: any role consists of ("inherits") its own permissions $\forall r\in R:r\leq r$ + \item Antisymmetry: no two different roles may mutually inherit their respective permissions $\forall r_1 ,r_2\in R:r_1\leq r_2\wedge r_2\leq r_1\Rightarrow r_1=r_2$ + \item Transitivity: permissions may be inherited indirectly $\forall r_1,r_2,r_3\in R:r_1\leq r_2 \wedge r_2\leq r_3\Rightarrow r_1\leq r_3$ + \end{itemize*} + \end{itemize*} \end{itemize*} - > $RBAC_1$ Security Model - > - > An $RBAC_1$ model is a tuple $⟨U,R,P,S,UA,PA,user,roles,RH⟩$ where - \begin{itemize*} - \item $U,R,P,S,UA,PA$ and $user$ are defined as for $RBAC_0$, - \item $RH\subseteq R\times R$ is a partial order that represents a role hierarchy where $⟨r,r'⟩\in RH\Leftrightarrow r\leq r'$ such that $⟨R,\leq⟩$ is a lattice, - \item roles is defined as for $RBAC_0$, while additionally holds: $\forall r,r'\in R,\exists s\in S:r\leq r'\wedge r'\in roles(s)\Rightarrow r\in roles(s)$. - \end{itemize*} + \note{$RBAC_1$ Security Model}{An $RBAC_1$ model is a tuple $⟨U,R,P,S,UA,PA,user,roles,RH⟩$ where + \begin{itemize*} + \item $U,R,P,S,UA,PA$ and $user$ are defined as for $RBAC_0$, + \item $RH\subseteq R\times R$ is a partial order that represents a role hierarchy where $⟨r,r'⟩\in RH\Leftrightarrow r\leq r'$ such that $⟨R,\leq⟩$ is a lattice, + \item roles is defined as for $RBAC_0$, while additionally holds: $\forall r,r'\in R,\exists s\in S:r\leq r'\wedge r'\in roles(s)\Rightarrow r\in roles(s)$. + \end{itemize*} + } In prose: When activating any role that inherits permissions from another role, this other role isautomatically(by definition) active as well. \begin{itemize*} @@ -2528,25 +1791,25 @@ RBAC 2 : Constraints \begin{itemize*} \item Observation: Assuming and activating roles in organizations is often more restricted: - \begin{itemize*} - \item Certain roles may not beactive at the same time(same session)for any user: "A payment initiator may not be a payment authorizer at the same time (in the same session)." - \item Certain roles may not be together assigned to any user: "A purchasing manager never be the same person as the head of internal auditing." - \item $\rightarrow$ separation of duty (SoD) - \item While SoD constraints are a more fine-grained type of security requirements to avoid mission-critical risks, there are other types represented by RBAC constraints. - \end{itemize*} + \begin{itemize*} + \item Certain roles may not beactive at the same time(same session)for any user: "A payment initiator may not be a payment authorizer at the same time (in the same session)." + \item Certain roles may not be together assigned to any user: "A purchasing manager never be the same person as the head of internal auditing." + \item $\rightarrow$ separation of duty (SoD) + \item While SoD constraints are a more fine-grained type of security requirements to avoid mission-critical risks, there are other types represented by RBAC constraints. + \end{itemize*} \item Constraint Types - \begin{itemize*} - \item Separation of duty: mutually exclusive roles - \item Quantitative constraints: maximum number of roles per user - \item Temporal constraints: time/date/week/... of role activation (advanced RBAC models, e.g. Bertino et al. [2001]) - \item Factual constraints: assigning or activating roles for specific permissions causally depends on any roles for a certain, other permissions (e.g. only allow user $u$ to activate auditingDelegator role if audit payments permission is usable by $u$) - \end{itemize*} + \begin{itemize*} + \item Separation of duty: mutually exclusive roles + \item Quantitative constraints: maximum number of roles per user + \item Temporal constraints: time/date/week/... of role activation (advanced RBAC models, e.g. Bertino et al. [2001]) + \item Factual constraints: assigning or activating roles for specific permissions causally depends on any roles for a certain, other permissions (e.g. only allow user $u$ to activate auditingDelegator role if audit payments permission is usable by $u$) + \end{itemize*} \item Modeling Constraints:(idea only) - \begin{itemize*} - \item $RBAC_2 : ⟨U,R,P,S,UA,PA,user,roles,RE⟩$ - \item $RBAC_3 : ⟨U,R,P,S,UA,PA,user,roles,RH,RE⟩$ - \item where $RE$ is aset of logical expressions over the other model components (such as $UA,PA,user,roles$). - \end{itemize*} + \begin{itemize*} + \item $RBAC_2 : ⟨U,R,P,S,UA,PA,user,roles,RE⟩$ + \item $RBAC_3 : ⟨U,R,P,S,UA,PA,user,roles,RH,RE⟩$ + \item where $RE$ is aset of logical expressions over the other model components (such as $UA,PA,user,roles$). + \end{itemize*} \end{itemize*} \paragraph{RBAC Summary} @@ -2554,22 +1817,22 @@ \item Scalability \item Application-oriented model abstractions \item Standardization (RBAC96) $\rightarrow$ tool-support for: - \begin{itemize*} - \item role engineering (identifying and modeling roles) - \item model engineering (specifying and validating a model configuration) - \item static model checking (verifying consistency and plausibility of a model configuration) - \end{itemize*} + \begin{itemize*} + \item role engineering (identifying and modeling roles) + \item model engineering (specifying and validating a model configuration) + \item static model checking (verifying consistency and plausibility of a model configuration) + \end{itemize*} \item Still weak OS-support - \begin{itemize*} - \item $\rightarrow$ application-level integrations (e. g. hospital IS, DBIS, ERP systems) - \item $\rightarrow$ middleware integrations (e. g. XACML, NGAC[Ferraiolo et al., 2016]) - \end{itemize*} + \begin{itemize*} + \item $\rightarrow$ application-level integrations (e. g. hospital IS, DBIS, ERP systems) + \item $\rightarrow$ middleware integrations (e. g. XACML, NGAC[Ferraiolo et al., 2016]) + \end{itemize*} \item Limited dynamic analyses w.r.t. automaton-based models - \begin{itemize*} - \item cf. HRU:safety properties? - \item solution approach: automaton-based RBAC96 model - \item $\rightarrow$ DRBAC 0 ... 3 [Schlegel and Amthor, 2020] - \end{itemize*} + \begin{itemize*} + \item cf. HRU:safety properties? + \item solution approach: automaton-based RBAC96 model + \item $\rightarrow$ DRBAC 0 ... 3 [Schlegel and Amthor, 2020] + \end{itemize*} \end{itemize*} @@ -2577,16 +1840,16 @@ Goals of ABAC: \begin{itemize*} \item Providing a more versatile solution than RBAC for these problems, especially for open and distributed systems. - \begin{itemize*} - \item Scalability and manageability - \item Application-oriented model abstractions - \item Model semantics meet functional requirements of open systems: - \begin{itemize*} - \item user IDs, INode IDs, ... only available locally, scaling bad - \item roles that gather permissions model functions limited to specific organizational structure; only assignable to users - \end{itemize*} - \item $\rightarrow$ Consider application-specific context of an access: attributes of subjects and objects(e. g. age, location, trust level, ...) - \end{itemize*} + \begin{itemize*} + \item Scalability and manageability + \item Application-oriented model abstractions + \item Model semantics meet functional requirements of open systems: + \begin{itemize*} + \item user IDs, INode IDs, ... only available locally, scaling bad + \item roles that gather permissions model functions limited to specific organizational structure; only assignable to users + \end{itemize*} + \item $\rightarrow$ Consider application-specific context of an access: attributes of subjects and objects(e. g. age, location, trust level, ...) + \end{itemize*} \end{itemize*} Idea: Generalizing the principle of indirection already known from RBAC @@ -2595,11 +1858,11 @@ \item RBAC: indirection via roles assigned to subjects \item ABAC: indirection via arbitrary attributes assigned to subjects or objects \item Attributes model application-specific properties of the system entities involved in any access, e. g.: - \begin{itemize*} - \item Age, location, trustworthiness of a application/user/device/... - \item Size, creation time, premium-access classification of web resource/multimedia content/document/... - \item Risk quantification involved with these subjects and objects (e. g. access from an IP address/proxy domain reportedly belonging to a TOR network) - \end{itemize*} + \begin{itemize*} + \item Age, location, trustworthiness of a application/user/device/... + \item Size, creation time, premium-access classification of web resource/multimedia content/document/... + \item Risk quantification involved with these subjects and objects (e. g. access from an IP address/proxy domain reportedly belonging to a TOR network) + \end{itemize*} \end{itemize*} \paragraph{ABAC Access Control Function} @@ -2617,18 +1880,17 @@ \item Here: minimal common formalism, based on Servos and Osborn [2017] \end{itemize*} - > ABAC Security Model - > - > An ABAC security model is a tuple $⟨S,O,AS,AO,attS,attO,OP,AAR⟩$ where - \begin{itemize*} - \item $S$ is a set of subject identifiers and $O$ is a set of object identifiers, - \item $A_S=V_S^1 \times...\times V_S^n$ is a set of subject attributes, where each attribute is an n-tuple of values from arbitrary domains $V_S^i$, $1\leq i \leq n$, - \item $A_O=V_O^1\times...\times V_O^m$ is a corresponding set of object attributes, based on values from arbitrary domains $V_O^j$, $1\leq j \leq m$, - \item $att_S:S\rightarrow A_S$ is the subject attribute assignment function, - \item $att_O:O\rightarrow A_O$ is the object attribute assignment function, - \item $OP$ is a set of operation identifiers, - \item $AAR\subseteq \Phi\times OP$ is the authorization relation. - \end{itemize*} + \note{ABAC Security Model}{An ABAC security model is a tuple $⟨S,O,AS,AO,attS,attO,OP,AAR⟩$ where + \begin{itemize*} + \item $S$ is a set of subject identifiers and $O$ is a set of object identifiers, + \item $A_S=V_S^1 \times...\times V_S^n$ is a set of subject attributes, where each attribute is an n-tuple of values from arbitrary domains $V_S^i$, $1\leq i \leq n$, + \item $A_O=V_O^1\times...\times V_O^m$ is a corresponding set of object attributes, based on values from arbitrary domains $V_O^j$, $1\leq j \leq m$, + \item $att_S:S\rightarrow A_S$ is the subject attribute assignment function, + \item $att_O:O\rightarrow A_O$ is the object attribute assignment function, + \item $OP$ is a set of operation identifiers, + \item $AAR\subseteq \Phi\times OP$ is the authorization relation. + \end{itemize*} + } Interpretation \begin{itemize*} @@ -2643,11 +1905,13 @@ \paragraph{ABAC Access Control Function} With conditions from $\Phi$ for executing operations in $OP,AAR$ determines the ACF of the model: - > ABAC ACF - > - > $f_{ABAC}:S\times O\times OP\rightarrow\{true,false\}$ where - > $f_{ABAC}(s,o,op)= \begin{cases} true, \quad\exists ⟨\phi,op⟩\in AAR:\phi(s,o)=true\\ false, \quad\text{ otherwise } \end{cases}$. - > We call $\phi$ an authorization predicate for $op$. + \note{ABAC ACF}{ + \begin{itemize*} + \item $f_{ABAC}:S\times O\times OP\rightarrow\{true,false\}$ where + \item $f_{ABAC}(s,o,op)= \begin{cases} true, \quad\exists ⟨\phi,op⟩\in AAR:\phi(s,o)=true\\ false, \quad\text{ otherwise } \end{cases}$. + \item We call $\phi$ an authorization predicate for $op$. + \end{itemize*} + } Example 1: Online Game Store \begin{itemize*} @@ -2680,18 +1944,18 @@ \item Scalability \item Application-oriented model abstractions \item Universality: ABAC can conveniently express - \begin{itemize*} - \item IBAC (attributes: IDs) - \item RBAC (attributes: roles) - \item MLS (attributes: sensitivity levels $\rightarrow$ next topic) - \end{itemize*} + \begin{itemize*} + \item IBAC (attributes: IDs) + \item RBAC (attributes: roles) + \item MLS (attributes: sensitivity levels $\rightarrow$ next topic) + \end{itemize*} \item Still weak OS-support $\rightarrow$ application-level integrations (increasingly replacing RBAC) \item Attribute semantics highly diverse, not normalizable $\rightarrow$ no common "standard ABAC" to expect (all too soon ...) \item Limited dynamic analyses w.r.t. automaton-based models - \begin{itemize*} - \item cf. HRU:safety properties? - \item solution approach: automaton-based ABAC model ... - \end{itemize*} + \begin{itemize*} + \item cf. HRU:safety properties? + \item solution approach: automaton-based ABAC model ... + \end{itemize*} \end{itemize*} \subsubsection{Information Flow Models} @@ -2709,10 +1973,10 @@ Lattices (refreshment) \begin{itemize*} \item Terms: - \begin{itemize*} - \item $inf_C$: "systemlow" - \item $sup_C$: "systemhigh" - \end{itemize*} + \begin{itemize*} + \item $inf_C$: "systemlow" + \item $sup_C$: "systemhigh" + \end{itemize*} \item $\rightarrow$ notably, a graph described by a lattice \item is connected \item has a source: $deg^-(inf_C)= 0$ @@ -2722,42 +1986,41 @@ Implementation of Information Flow Models \begin{itemize*} \item Background: Information flows and read/write operations are isomorphic - \begin{itemize*} - \item s has read permission w.r.t. o $\Leftrightarrow$ information may flow from o to s - \item s has write permission w.r.t. o $\Leftrightarrow$ information may flow from s to o - \end{itemize*} + \begin{itemize*} + \item s has read permission w.r.t. o $\Leftrightarrow$ information may flow from o to s + \item s has write permission w.r.t. o $\Leftrightarrow$ information may flow from s to o + \end{itemize*} \item $\rightarrow$ Implementation by standard AC mechanisms! \end{itemize*} Analysis of Information Flow Models \begin{itemize*} \item IF Transitivity $\rightarrow$ analysis goal: covert information flows - \begin{itemize*} - \item Question: "Is there a possible, sequential usage of read\item and write-permissions that ultimately leads to an unintended information flow?" - \end{itemize*} + \begin{itemize*} + \item Question: "Is there a possible, sequential usage of read\item and write-permissions that ultimately leads to an unintended information flow?" + \end{itemize*} \item IF Antisymmetry $\rightarrow$ analysis goal: redundancy - \begin{itemize*} - \item Question: "Which subjects/object share the same possible information flows and are therefore redundant?" - \end{itemize*} + \begin{itemize*} + \item Question: "Which subjects/object share the same possible information flows and are therefore redundant?" + \end{itemize*} \end{itemize*} \paragraph{The Denning Model} On of the first information flow models [Denning, 1976]: - > Denning Security Model - > - > A Denning information flow model is a tuple $⟨S,O,L,cl,\bigoplus⟩$ where - \begin{itemize*} - \item S is a set of subjects, - \item O is a set of objects, - \item $L=⟨C,\leq⟩$ is a lattice where - \begin{itemize*} - \item C is a set of classes, - \item $\leq$ is a dominance relation wherec $\leq d \Leftrightarrow$ information may flow from c to d, - \end{itemize*} - \item $cl:S\cup O\rightarrow C$ is a classification function, and - \item $\bigoplus:C\times C\rightarrow C$ is a reclassification function. - \end{itemize*} + \note{Denning Security Model}{A Denning information flow model is a tuple $⟨S,O,L,cl,\bigoplus⟩$ where + \begin{itemize*} + \item S is a set of subjects, + \item O is a set of objects, + \item $L=⟨C,\leq⟩$ is a lattice where + \begin{itemize*} + \item C is a set of classes, + \item $\leq$ is a dominance relation wherec $\leq d \Leftrightarrow$ information may flow from c to d, + \end{itemize*} + \item $cl:S\cup O\rightarrow C$ is a classification function, and + \item $\bigoplus:C\times C\rightarrow C$ is a reclassification function. + \end{itemize*} + } Interpretation \begin{itemize*} @@ -2773,25 +2036,25 @@ \item $S=O=\{cox,kelso,carla,...\}$ \item $C=\{Physician, Anamnesis, Pharmacy, Medication,...\}$ \item dominance relation $\leq$: - \begin{itemize*} - \item rule "information may flow from any ward physician to an anamnesis record" $\Leftrightarrow$ Physician $\leq$ Anamnesis - \item rule "information may flow from a medication record to the pharmacy" $\Leftrightarrow$ Medication $\leq$ Pharmacy - \end{itemize*} + \begin{itemize*} + \item rule "information may flow from any ward physician to an anamnesis record" $\Leftrightarrow$ Physician $\leq$ Anamnesis + \item rule "information may flow from a medication record to the pharmacy" $\Leftrightarrow$ Medication $\leq$ Pharmacy + \end{itemize*} \item classification cl: - \begin{itemize*} - \item $cox=Physician$ - \item $carla=Medication$ - \end{itemize*} + \begin{itemize*} + \item $cox=Physician$ + \item $carla=Medication$ + \end{itemize*} \end{itemize*} We can now ... \begin{itemize*} \item precisely define all information flows valid for a given policy \item define analysis goals for an IF model w.r.t. - \begin{itemize*} - \item Correctness: $\exists$ covert information flows? (transitivity of $\leq$, automation: graph analysis tools) - \item Redundancy: $\exists$ sets of subjects and objects with (transitively) equivalent information contents? (antisymmetry of $\leq$, automation: graph analysis tools) - \end{itemize*} + \begin{itemize*} + \item Correctness: $\exists$ covert information flows? (transitivity of $\leq$, automation: graph analysis tools) + \item Redundancy: $\exists$ sets of subjects and objects with (transitively) equivalent information contents? (antisymmetry of $\leq$, automation: graph analysis tools) + \end{itemize*} \item implement a model: through an automatically generated, isomorphic ACM(using already-present ACLs!) \end{itemize*} @@ -2801,10 +2064,10 @@ \begin{itemize*} \item Introducing a hierarchy of information flow classes: levels of trust \item Subjects and objects are classified: - \begin{itemize*} - \item Subjects w.r.t. their trust worthiness - \item Objects w.r.t. their criticality - \end{itemize*} + \begin{itemize*} + \item Subjects w.r.t. their trust worthiness + \item Objects w.r.t. their criticality + \end{itemize*} \item Within this hierarchy, information may flow only in one direction $\rightarrow$ "secure" according to these levels! \item $\rightarrow \exists$ MLS models for different security goals! \end{itemize*} @@ -2823,27 +2086,27 @@ \item Objects $O=\{ProjectXFiles, Timetable, BulletinBoard\}$ \item Subjects $S=\{Ann, Bob\}$ \item Classification of objects (classification level): - \begin{itemize*} - \item $cl(ProjectXFiles)=secret$ - \item $cl(Timetable)=confidential$ - \item $cl(BulletinBoard)=pulic$ - \end{itemize*} + \begin{itemize*} + \item $cl(ProjectXFiles)=secret$ + \item $cl(Timetable)=confidential$ + \item $cl(BulletinBoard)=pulic$ + \end{itemize*} \item Classification of subjects (clearance level): - \begin{itemize*} - \item $cl(Ann)=confidential$ - \item $cl(Bob)=public$ - \end{itemize*} + \begin{itemize*} + \item $cl(Ann)=confidential$ + \item $cl(Bob)=public$ + \end{itemize*} \item Neither Ann nor Bob can readProjectXFiles \item Ann can - \begin{itemize*} - \item write to ProjectXFiles and Timetable - \item read from Timetable and BulletinBoard - \end{itemize*} + \begin{itemize*} + \item write to ProjectXFiles and Timetable + \item read from Timetable and BulletinBoard + \end{itemize*} \item Bob can - \begin{itemize*} - \item write to all objects - \item read from BulletinBoard - \end{itemize*} + \begin{itemize*} + \item write to all objects + \item read from BulletinBoard + \end{itemize*} \end{itemize*} @@ -2856,10 +2119,10 @@ \item from the Denning model: information flow and lattices \item from the MLS models: information flow hierarchy \item from the HRU model: - \begin{itemize*} - \item Modeling dynamic behavior: state machine and STS - \item Model implementation: ACM - \end{itemize*} + \begin{itemize*} + \item Modeling dynamic behavior: state machine and STS + \item Model implementation: ACM + \end{itemize*} \item $\rightarrow$ application-oriented model engineering by composition of known abstractions \end{itemize*} @@ -2867,38 +2130,37 @@ \begin{itemize*} \item entity sets S,O \item $lattice⟨C,\leq⟩$ defines information flows by - \begin{itemize*} - \item C: classification/clearance levels - \item $\leq$: hierarchy of trust - \end{itemize*} + \begin{itemize*} + \item C: classification/clearance levels + \item $\leq$: hierarchy of trust + \end{itemize*} \item classification function $cl$ assigns - \begin{itemize*} - \item clearance level from C to subjects - \item classification level from C to objects - \end{itemize*} + \begin{itemize*} + \item clearance level from C to subjects + \item classification level from C to objects + \end{itemize*} \item Model’s runtime behavior is specified by a deterministic automaton \end{itemize*} - > BLP Security Model - > - > A BLP model is a deterministic automaton $⟨S,O,L,Q,\sum,\sigma,q_0,R⟩$ where - \begin{itemize*} - \item S and O are (static) subject and object sets, - \item $L=⟨C,\leq⟩$ is a (static) lattice consisting of - \begin{itemize*} - \item the classes set C, - \item the dominance relation $\leq$, - \end{itemize*} - \item $Q=M\times CL$ is the state space where - \begin{itemize*} - \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is the set ofpossible ACMs, - \item $CL=\{cl|cl:S\cup O\rightarrow C\}$ is a set offunctions that classify entities in $S\cup O$, - \end{itemize*} - \item $\sum$ is the input alphabet, - \item $\sigma:Q\times \sum\rightarrow Q$ is the state transition function, - \item $q_0\in Q$ is the initial state, - \item $R=\{read,write\}$ is the set of access rights. - \end{itemize*} + \note{BLP Security Model}{A BLP model is a deterministic automaton $⟨S,O,L,Q,\sum,\sigma,q_0,R⟩$ where + \begin{itemize*} + \item S and O are (static) subject and object sets, + \item $L=⟨C,\leq⟩$ is a (static) lattice consisting of + \begin{itemize*} + \item the classes set C, + \item the dominance relation $\leq$, + \end{itemize*} + \item $Q=M\times CL$ is the state space where + \begin{itemize*} + \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is the set ofpossible ACMs, + \item $CL=\{cl|cl:S\cup O\rightarrow C\}$ is a set offunctions that classify entities in $S\cup O$, + \end{itemize*} + \item $\sum$ is the input alphabet, + \item $\sigma:Q\times \sum\rightarrow Q$ is the state transition function, + \item $q_0\in Q$ is the initial state, + \item $R=\{read,write\}$ is the set of access rights. + \end{itemize*} + } Interpretation \begin{itemize*} @@ -2906,16 +2168,16 @@ \item L: models confidentiality hierarchy \item cl: models classification meta-information about subjects and objects \item $Q=M\times CL$ models dynamic protection states; includes - \begin{itemize*} - \item rights in the ACM, - \item classification of subjects/objects, - \item not: S and O (different to HRU $\rightarrow$ consequences for safety analysis?) - \end{itemize*} + \begin{itemize*} + \item rights in the ACM, + \item classification of subjects/objects, + \item not: S and O (different to HRU $\rightarrow$ consequences for safety analysis?) + \end{itemize*} \item Commands in the STS may therefore - \begin{itemize*} - \item change rights in the ACM, - \item reclassify subjects and objects. - \end{itemize*} + \begin{itemize*} + \item change rights in the ACM, + \item reclassify subjects and objects. + \end{itemize*} \end{itemize*} \paragraph{Lattice vs. ACM} @@ -2933,16 +2195,16 @@ Rationale \begin{itemize*} \item L is an application-oriented abstraction - \begin{itemize*} - \item Supports convenient for model specification - \item Supports easy model correctness analysis ($\rightarrow$ reachability analyses in graphs) - \item $\rightarrow$ easy to specify and to analyze - \end{itemize*} + \begin{itemize*} + \item Supports convenient for model specification + \item Supports easy model correctness analysis ($\rightarrow$ reachability analyses in graphs) + \item $\rightarrow$ easy to specify and to analyze + \end{itemize*} \item m can be directly implemented by standard OS/DBIS access control mechanisms (ACLs, Capabilities) $\rightarrow$ easy to implement \item m is determined (= restricted) by L and cl, not vice-versa! \end{itemize*} - > Rationale for L and m + Rationale for L and m \begin{itemize*} \item L and cl control m \item m provides an easy specification for model implementation @@ -2959,21 +2221,20 @@ \paragraph{BLP Security} Help Definitions - > Read-Security Rule - > A BLP model state $⟨m,cl⟩$ is called read-secure iff $\forall s\in S,o\in O:read\in m(s,o)\Rightarrow cl(o) \leq cl(s)$. + \note{Read-Security Rule}{A BLP model state $⟨m,cl⟩$ is called read-secure iff $\forall s\in S,o\in O:read\in m(s,o)\Rightarrow cl(o) \leq cl(s)$.} - > Write-Security Rule - > A BLP model state $⟨m,cl⟩$ is called write-secure iff $\forall s\in S,o\in O:write\in m(s,o)\Rightarrow cl(s)\leq cl(o)$. + \note{Write-Security Rule}{A BLP model state $⟨m,cl⟩$ is called write-secure iff $\forall s\in S,o\in O:write\in m(s,o)\Rightarrow cl(s)\leq cl(o)$.} Note: In some literature, read-security is called "simple security", while write-security is called "$^*$-property". Reasons are obscure-historical. - > State Security - > A BLP model state is called secure iff it is both read- and write-secure. + \note{State Security}{A BLP model state is called secure iff it is both read- and write-secure.} - > Model Security - > A BLP model with initial state $q_0$ is called secure iff - > 1. $q_0$ is secure and - > 2. each state reachable from $q_0$ by a finite input sequence is secure. + \note{Model Security}{A BLP model with initial state $q_0$ is called secure iff + \begin{enumerate*} + \item $q_0$ is secure and + \item each state reachable from $q_0$ by a finite input sequence is secure. + \end{enumerate*} + } The above definition is \begin{itemize*} @@ -2987,43 +2248,43 @@ \item Idea: let’s look at properties of the finite and small model components $\rightarrow\sigma\rightarrow$ STS \end{itemize*} - - > The BLP Basic Security Theorem - > - > A BLP model $⟨S,O,L,Q,\sum,\sigma,q_0,R⟩$ is secure iff both of the following holds: - > 1. $q_0$ is secure - > 2. $\sigma$ is build such that for each state q reachable from $q_0$ by a finite input sequence, where $q=⟨m,cl⟩$ and $q'=\sigma(q,\delta)=m',cl',\forall s\in S, o\in O,\delta\in\sum$ the following holds: - \begin{itemize*} - \item Read-security conformity: - \begin{itemize*} - \item read $\not\in m(s,o)\wedge read\in m'(s,o)\Rightarrow cl'(o)\leq cl'(s)$ - \item read $\in m(s,o) \wedge\lnot (cl'(o)\leq cl'(s)) \Rightarrow read \not\in m'(s,o)$ - \end{itemize*} - \item Write-security conformity: - \begin{itemize*} - \item write $\not\in m(s,o)\wedge write \in m'(s,o)\Rightarrow cl'(s)\leq cl'(o)$ - \item write $\in m(s,o)\wedge\lnot(cl'(s)\leq cl'(o)) \Rightarrow write \not\in m'(s,o)$ - \end{itemize*} - \end{itemize*} + \note{The BLP Basic Security Theorem}{A BLP model $⟨S,O,L,Q,\sum,\sigma,q_0,R⟩$ is secure iff both of the following holds: + \begin{enumerate*} + \item $q_0$ is secure + \item $\sigma$ is build such that for each state q reachable from $q_0$ by a finite input sequence, where $q=⟨m,cl⟩$ and $q'=\sigma(q,\delta)=m',cl',\forall s\in S, o\in O,\delta\in\sum$ the following holds: + \end{enumerate*} + \begin{itemize*} + \item Read-security conformity: + \begin{itemize*} + \item read $\not\in m(s,o)\wedge read\in m'(s,o)\Rightarrow cl'(o)\leq cl'(s)$ + \item read $\in m(s,o) \wedge\lnot (cl'(o)\leq cl'(s)) \Rightarrow read \not\in m'(s,o)$ + \end{itemize*} + \item Write-security conformity: + \begin{itemize*} + \item write $\not\in m(s,o)\wedge write \in m'(s,o)\Rightarrow cl'(s)\leq cl'(o)$ + \item write $\in m(s,o)\wedge\lnot(cl'(s)\leq cl'(o)) \Rightarrow write \not\in m'(s,o)$ + \end{itemize*} + \end{itemize*} + } Proof of Read Security \begin{itemize*} \item Technique: Term rewriting \item Let $q=\sigma*(q_0 ,\sigma^+),\sigma^+\in\sigma^+,q'=\delta(q,\sigma),\sigma\in\sigma,s\in S,o\in O$. With $q=⟨m,cl⟩$ and $q'=m',cl'$, the BLP BST for read-security is - \begin{itemize*} - \item (a1) $read \not\in m(s,o) \wedge read\in m'(s,o) \Rightarrow cl'(o) \leq cl'(s)$ - \item (a2) $read \in m(s,o) \wedge\lnot (cl'(o)\leq cl'(s)) \Rightarrow read \not\in m'(s,o)$ - \item Let’s first introduce some convenient abbreviations for this: - \begin{itemize*} - \item $R:=read\in m(s,o)$ - \item $R':=read\in m'(s,o)$ - \item $C':=cl'(o) \leq cl'(s)$ - \item $\sigma^+$ is the set of finite, non-empty input sequences. - \end{itemize*} - \item Proposition: $(a1) \wedge (a2)\equiv read-security$ - \item Proof: $(a1) \wedge (a2)= R' \Rightarrow C'\equiv read\in m'(s,o) \Rightarrow cl'(o)\leq cl'(s)$, which exactly matches the definition of read-security for $q'$. - \item Write-security: Same steps for $(b1)\wedge (b2)$. - \end{itemize*} + \begin{itemize*} + \item (a1) $read \not\in m(s,o) \wedge read\in m'(s,o) \Rightarrow cl'(o) \leq cl'(s)$ + \item (a2) $read \in m(s,o) \wedge\lnot (cl'(o)\leq cl'(s)) \Rightarrow read \not\in m'(s,o)$ + \item Let’s first introduce some convenient abbreviations for this: + \begin{itemize*} + \item $R:=read\in m(s,o)$ + \item $R':=read\in m'(s,o)$ + \item $C':=cl'(o) \leq cl'(s)$ + \item $\sigma^+$ is the set of finite, non-empty input sequences. + \end{itemize*} + \item Proposition: $(a1) \wedge (a2)\equiv read-security$ + \item Proof: $(a1) \wedge (a2)= R' \Rightarrow C'\equiv read\in m'(s,o) \Rightarrow cl'(o)\leq cl'(s)$, which exactly matches the definition of read-security for $q'$. + \item Write-security: Same steps for $(b1)\wedge (b2)$. + \end{itemize*} \end{itemize*} Where Do We Stand? @@ -3040,12 +2301,12 @@ \item Comp: set of compartments \item $co:S\cup O\rightarrow 2^{Comp}$: assigns a set of compartments to an entity as an (additional) attribute \item Refined state security rules: - \begin{itemize*} - \item $⟨m,cl,co⟩$ is read-secure $\Leftrightarrow\forall s\in S,o\in O:read \in m(s,o)\Rightarrow cl(o)\leq cl(s)\wedge co(o) \subseteq co(s)$ - \item $⟨m,cl,co⟩$ is write-secure $\Leftrightarrow\forall s\in S,o\in O:write\in m(s,o)\Rightarrow cl(s)\leq cl(o)\wedge co(o) \subseteq co(s)$ - \item Good ol’ BLP: $⟨S,O,L,Q,\sigma,\delta,q_0⟩$ - \item With compartments: $⟨S,O,L,Comp,Q_{co},\sigma,\delta,q_0⟩$ where $Q_{co}=M\times CL\times CO$ and $CO=\{co|co:S\cup O\rightarrow 2^{Comp}\}$ - \end{itemize*} + \begin{itemize*} + \item $⟨m,cl,co⟩$ is read-secure $\Leftrightarrow\forall s\in S,o\in O:read \in m(s,o)\Rightarrow cl(o)\leq cl(s)\wedge co(o) \subseteq co(s)$ + \item $⟨m,cl,co⟩$ is write-secure $\Leftrightarrow\forall s\in S,o\in O:write\in m(s,o)\Rightarrow cl(s)\leq cl(o)\wedge co(o) \subseteq co(s)$ + \item Good ol’ BLP: $⟨S,O,L,Q,\sigma,\delta,q_0⟩$ + \item With compartments: $⟨S,O,L,Comp,Q_{co},\sigma,\delta,q_0⟩$ where $Q_{co}=M\times CL\times CO$ and $CO=\{co|co:S\cup O\rightarrow 2^{Comp}\}$ + \end{itemize*} \end{itemize*} Example @@ -3065,41 +2326,41 @@ \item Scalability $\rightarrow$ attributes: trust levels \item Modeling dynamic behavior $\rightarrow$ automaton with STS \item Correctness guarantees - \begin{itemize*} - \item Of model specification: analysis of - \begin{itemize*} - \item consistency: BLP security, BST - \item completeness of IF: IFG path finding - \item presence of unintended, transitive IF: IFG path finding - \item unwanted redundancy: IF cycles $\rightarrow$ information equivalence classes - \item safety properties:decidable! - \item $\rightarrow$ tool-supportpossible! - \end{itemize*} - \item Of model implementation: good ol’ ACM $\rightarrow$ ACLs, capabilities - \end{itemize*} + \begin{itemize*} + \item Of model specification: analysis of + \begin{itemize*} + \item consistency: BLP security, BST + \item completeness of IF: IFG path finding + \item presence of unintended, transitive IF: IFG path finding + \item unwanted redundancy: IF cycles $\rightarrow$ information equivalence classes + \item safety properties:decidable! + \item $\rightarrow$ tool-supportpossible! + \end{itemize*} + \item Of model implementation: good ol’ ACM $\rightarrow$ ACLs, capabilities + \end{itemize*} \item Implementation - \begin{itemize*} - \item ACM is a standard AC mechanism in contemporary implementation platforms (cf. prev. slide) - \item Contemporary standard OSs need this: do not support mechanisms for - \begin{itemize*} - \item entity classification - \item arbitrary STSs - \end{itemize*} - \item $\rightarrow$ newer platforms may do: SELinux, SEAndroid, TrustedBSD, Solaris, Trusted Extensions, PostgreSQL - \end{itemize*} + \begin{itemize*} + \item ACM is a standard AC mechanism in contemporary implementation platforms (cf. prev. slide) + \item Contemporary standard OSs need this: do not support mechanisms for + \begin{itemize*} + \item entity classification + \item arbitrary STSs + \end{itemize*} + \item $\rightarrow$ newer platforms may do: SELinux, SEAndroid, TrustedBSD, Solaris, Trusted Extensions, PostgreSQL + \end{itemize*} \item Is an example of a hybrid model: IF + AC + ABAC \end{itemize*} Lessons Learned - What we can learn from BLP for designing and using security models: \begin{itemize*} \item Model composition from known model abstractions - \begin{itemize*} - \item Denning: IF modeling - \item ABAC: IF classes and compartments as attributes - \item MSL: modeling trust as a linear hierarchy - \item HRU: modeling dynamic behavior - \item ACM: implementing application-oriented policy semantics - \end{itemize*} + \begin{itemize*} + \item Denning: IF modeling + \item ABAC: IF classes and compartments as attributes + \item MSL: modeling trust as a linear hierarchy + \item HRU: modeling dynamic behavior + \item ACM: implementing application-oriented policy semantics + \end{itemize*} \item Consistency is an important property of composed models \item BLP is further extensible and refinable $\rightarrow$ starting point for later models, e. g. Biba \end{itemize*} @@ -3116,11 +2377,11 @@ Applications Example: On-board Airplane Passenger Information Systems \begin{itemize*} \item Goal: Provide in-flight information in cabin network - \begin{itemize*} - \item Flight instruments data - \item Outboard camera video streams - \item communication pilot - tower - \end{itemize*} + \begin{itemize*} + \item Flight instruments data + \item Outboard camera video streams + \item communication pilot - tower + \end{itemize*} \item Integrity: no information flow from cabin to flight deck! \item As employed in Boeing 787: common network for cabin and flight deck + software firewall + Biba implementation \end{itemize*} @@ -3130,12 +2391,12 @@ \item An application of the Biba model for OS access control: \item Integrity: Protect system files from malicious user (software) tampering \item Class hierarchy: - \begin{itemize*} - \item system: OS level objects - \item high: services - \item medium: user level objects - \item low: untrusted processes e. g. web browser, setup application, ... - \end{itemize*} + \begin{itemize*} + \item system: OS level objects + \item high: services + \item medium: user level objects + \item low: untrusted processes e. g. web browser, setup application, ... + \end{itemize*} \item Consequence: every file, process, ... created by the web browser is classified low $\rightarrow$ cannot violate integrity of system- and user-objects \item Manual user involvement ($\rightarrow$ DAC portion of the policy):resolving intended exceptions, e. g. to install trusted application software \end{itemize*} @@ -3144,22 +2405,21 @@ \subsubsection{Non-interference Models} Problem No. 1: Covert Channels - > Covert Channel [Lampson, 1973] - > Channels [...] not intended for information transfer at all, such as the service program’s effect on the system load. + \note{Covert Channel [Lampson, 1973]}{Channels [...] not intended for information transfer at all, such as the service program’s effect on the system load.} \begin{itemize*} \item AC policies (ACM, HRU, TAM, RBAC, ABAC): colluding malware agents, escalation of common privileges - \begin{itemize*} - \item Process 1: only read permissions on user files - \item Process 2: only permission to create an internet socket - \item both:communication via covert channel(e. g. swapping behavior) - \end{itemize*} + \begin{itemize*} + \item Process 1: only read permissions on user files + \item Process 2: only permission to create an internet socket + \item both:communication via covert channel(e. g. swapping behavior) + \end{itemize*} \item MLS policies (Denning, BLP, Biba): indirect information flow exploitation (Note: We can never prohibitany possible transitive IF ...) - \begin{itemize*} - \item Test for existence of a file - \item Volume control on smartphones - \item Timing channels from server response times - \end{itemize*} + \begin{itemize*} + \item Test for existence of a file + \item Volume control on smartphones + \item Timing channels from server response times + \end{itemize*} \end{itemize*} Problem No. 2: Damage Range @@ -3201,9 +2461,7 @@ \item Cross-domain (inter)actions $\rightarrow$ interference \end{itemize*} From convert channels to domain interference: - > Non-Interference - > - > Two domains do not interfere with each other iff no action in one domain can be observed by the other. + \note{Non-Interference}{Two domains do not interfere with each other iff no action in one domain can be observed by the other.} $\rightarrow$ NI Model Abstractions: \begin{itemize*} @@ -3213,37 +2471,37 @@ \item Effects of actions on domains defined by a mapping $dom:A\rightarrow 2^D$ \end{itemize*} - > NI Security Model - > An NI model is a det. automaton $⟨Q,\sigma,\delta,\lambda,q_0,D,A,dom,\approx_{NI},Out⟩$ where - \begin{itemize*} - \item Q is the set of (abstract) states, - \item $\sigma=A$ is the input alphabet where A is the set of (abstract) actions, - \item $\delta:Q\times\sigma\rightarrow Q$ is the state transition function, - \item $\lambda:Q\times\sigma\rightarrow Out$ is the output function, - \item $q_0\in Q$ is the initial state, - \item $D$ is a set of domains, - \item $dom:A\rightarrow 2^D$ is adomain function that completely defines the set of domains affected by an action, - \item $\approx_{NI}\subseteq D\times D$ is a non-interference relation, - \item $Out$ is a set of (abstract) outputs. - \end{itemize*} + \note{NI Security Model}{An NI model is a det. automaton $⟨Q,\sigma,\delta,\lambda,q_0,D,A,dom,\approx_{NI},Out⟩$ where + \begin{itemize*} + \item Q is the set of (abstract) states, + \item $\sigma=A$ is the input alphabet where A is the set of (abstract) actions, + \item $\delta:Q\times\sigma\rightarrow Q$ is the state transition function, + \item $\lambda:Q\times\sigma\rightarrow Out$ is the output function, + \item $q_0\in Q$ is the initial state, + \item $D$ is a set of domains, + \item $dom:A\rightarrow 2^D$ is adomain function that completely defines the set of domains affected by an action, + \item $\approx_{NI}\subseteq D\times D$ is a non-interference relation, + \item $Out$ is a set of (abstract) outputs. + \end{itemize*} + } NI Security Model is also called Goguen/Meseguer-Model [Goguen and Meseguer, 1982]. BLP written as an NI Model \begin{itemize*} \item BLP Rules: - \begin{itemize*} - \item write in class public may affect public and confidential - \item write in class confidential may only affect confidential - \end{itemize*} + \begin{itemize*} + \item write in class public may affect public and confidential + \item write in class confidential may only affect confidential + \end{itemize*} \item NI Model: - \begin{itemize*} - \item $D=\{d_{pub},d_{conf}\}$ - \item write in $d_{conf}$ does not affect $d_{pub}$, so $d_{conf} \approx_{NI} d_{pub}$ - \item $A=\{writeInPub, writeInConf\}$ - \item $dom(writeInPub)=\{d_{pub},d_{conf}\}$ - \item $dom(writeInConf)=\{d_{conf}\}$ - \end{itemize*} + \begin{itemize*} + \item $D=\{d_{pub},d_{conf}\}$ + \item write in $d_{conf}$ does not affect $d_{pub}$, so $d_{conf} \approx_{NI} d_{pub}$ + \item $A=\{writeInPub, writeInConf\}$ + \item $dom(writeInPub)=\{d_{pub},d_{conf}\}$ + \item $dom(writeInConf)=\{d_{conf}\}$ + \end{itemize*} \end{itemize*} \paragraph{NI Model Analysis} @@ -3258,18 +2516,15 @@ Is there a sequence of actions $a^*\in A^*$ that violates $\approx_{NI}$? $\rightarrow$ A model is called $NI$-secure iff there is no sequence of actions that results in an illegal domain interference. Now what does this meansprecisely...? Before we define what NI-secure is, assume we could remove all actions from an action sequence that have no effect on a given set of domains: - > Purge Function - > - > Let $aa^*\in A^*$ be a sequence of actions consisting of a single action $a\in A\cup\{\epsilon\}$ followed by a sequence $a^*\in A^*$, where $\epsilon$ denotes an empty sequence. Let $D'\in 2^D$ be any set of domains. Then, purge: $A^*\times 2^D \rightarrow A^*$ computes a subsequence of $aa^*$ by removing such actions without an observable effect on any element of $D':$ - \begin{itemize*} - \item $purge(aa^*,D')=\begin{cases} a\circ purge(a^*,D'), \quad\exists d_a\in dom(a),d'\in D':d_a\approx_I d' \\ purge(a^*,D'), \quad\text{ otherwise }\end{cases}$ - \item $purge(\epsilon,D')=\epsilon$ - \end{itemize*} - > where $\approx_I$ is the complement of $\approx_{NI}:d_1 \approx_I d_2\Leftrightarrow \lnot(d_1 \approx_{NI} d_2)$. + \note{Purge Function}{Let $aa^*\in A^*$ be a sequence of actions consisting of a single action $a\in A\cup\{\epsilon\}$ followed by a sequence $a^*\in A^*$, where $\epsilon$ denotes an empty sequence. Let $D'\in 2^D$ be any set of domains. Then, purge: $A^*\times 2^D \rightarrow A^*$ computes a subsequence of $aa^*$ by removing such actions without an observable effect on any element of $D':$ + \begin{itemize*} + \item $purge(aa^*,D')=\begin{cases} a\circ purge(a^*,D'), \quad\exists d_a\in dom(a),d'\in D':d_a\approx_I d' \\ purge(a^*,D'), \quad\text{ otherwise }\end{cases}$ + \item $purge(\epsilon,D')=\epsilon$ + \end{itemize*} + where $\approx_I$ is the complement of $\approx_{NI}:d_1 \approx_I d_2\Leftrightarrow \lnot(d_1 \approx_{NI} d_2)$. + } - > NI Security - > - > For a state $q\in Q$ of an NI model $⟨Q,\sigma,\delta,\lambda,q_0,D,A,dom,\approx_{NI},Out⟩$, the predicate ni-secure(q) holds iff $\forall a\in A,\forall a^*\in A^*:\lambda (\delta^*(q,a^*),a)=\lambda(\delta^*(q,purge(a^*,dom(a))),a)$ + \note{NI Security}{For a state $q\in Q$ of an NI model $⟨Q,\sigma,\delta,\lambda,q_0,D,A,dom,\approx_{NI},Out⟩$, the predicate ni-secure(q) holds iff $\forall a\in A,\forall a^*\in A^*:\lambda (\delta^*(q,a^*),a)=\lambda(\delta^*(q,purge(a^*,dom(a))),a)$.} Interpretation 1. Running an NI model on $⟨q,a^*⟩$ yields $q'=\delta^*(q,a^*)$. @@ -3279,24 +2534,24 @@ \paragraph{Comparison to HRU and IF Models} \begin{itemize*} \item HRU Models - \begin{itemize*} - \item Policies describe rules that control subjects accessing objects - \item Analysis goal: right proliferation - \item Covert channels analysis: only based on model implementation - \end{itemize*} + \begin{itemize*} + \item Policies describe rules that control subjects accessing objects + \item Analysis goal: right proliferation + \item Covert channels analysis: only based on model implementation + \end{itemize*} \item IF Models - \begin{itemize*} - \item Policies describe rules about legal information flows - \item Analysis goals: indirect IFs, redundancy, inner consistency - \item Covert channel analysis: same as HRU - \end{itemize*} + \begin{itemize*} + \item Policies describe rules about legal information flows + \item Analysis goals: indirect IFs, redundancy, inner consistency + \item Covert channel analysis: same as HRU + \end{itemize*} \item NI Models - \begin{itemize*} - \item Rules about mutual interference between domains - \item Analysis goal: consistency of $\approx_{NI}$ and $dom$ - \item Implementation needs rigorous domain isolation (more rigorous than MLS, e.g. object encryption is not sufficient!) $\rightarrow$ expensive - \item State of the Art w.r.t. isolation completeness: VMs > OS domains (SELinux) > Containers - \end{itemize*} + \begin{itemize*} + \item Rules about mutual interference between domains + \item Analysis goal: consistency of $\approx_{NI}$ and $dom$ + \item Implementation needs rigorous domain isolation (more rigorous than MLS, e.g. object encryption is not sufficient!) $\rightarrow$ expensive + \item State of the Art w.r.t. isolation completeness: VMs > OS domains (SELinux) > Containers + \end{itemize*} \end{itemize*} \subsubsection{Hybrid Models} @@ -3306,35 +2561,35 @@ Security policy family for consulting companies \begin{itemize*} \item Clients of any such company - \begin{itemize*} - \item Companies, including their business data - \item Often: mutual competitors - \end{itemize*} + \begin{itemize*} + \item Companies, including their business data + \item Often: mutual competitors + \end{itemize*} \item Employees of consulting companies - \begin{itemize*} - \item Are assigned to clients they consult (decided by management) - \item Work for many clients $\rightarrow$ gather insider information - \end{itemize*} + \begin{itemize*} + \item Are assigned to clients they consult (decided by management) + \item Work for many clients $\rightarrow$ gather insider information + \end{itemize*} \item $\rightarrow$ Policy goal: No flow of (insider) information between competing clients \end{itemize*} Why look at specifically these policies? \begin{itemize*} \item Modeling - \begin{itemize*} - \item Composition of - \begin{itemize*} - \item Discretionary IBAC components - \item Mandatory ABAC components - \end{itemize*} - \item Driven by real-world demands: iterative refinements of a model over time - \begin{itemize*} - \item Brewer-Nash model [Brewer and Nash, 1989] - \item Information flow model [Sandhu, 1992a] - \item Attribute-based model [Sharifi and Tripunitara, 2013] - \end{itemize*} - \item Application areas: consulting, cloud computing - \end{itemize*} + \begin{itemize*} + \item Composition of + \begin{itemize*} + \item Discretionary IBAC components + \item Mandatory ABAC components + \end{itemize*} + \item Driven by real-world demands: iterative refinements of a model over time + \begin{itemize*} + \item Brewer-Nash model [Brewer and Nash, 1989] + \item Information flow model [Sandhu, 1992a] + \item Attribute-based model [Sharifi and Tripunitara, 2013] + \end{itemize*} + \item Application areas: consulting, cloud computing + \end{itemize*} \end{itemize*} \paragraph{The Brewer-Nash Model} @@ -3352,12 +2607,12 @@ Example \begin{itemize*} \item Consultancy clients - \begin{itemize*} - \item Banks: HSBC, Deutsche Bank, Citigroup - \item Oil companies: Shell, Exxon Mobil/Esso - \end{itemize*} + \begin{itemize*} + \item Banks: HSBC, Deutsche Bank, Citigroup + \item Oil companies: Shell, Exxon Mobil/Esso + \end{itemize*} \item Conflicts: business-crucial information flows between banks and oil companies - %![](Assets/Systemsicherheit-brewer-example.png) + %![](Assets/Systemsicherheit-brewer-example.png) \end{itemize*} Representation of Conflict Classes @@ -3374,27 +2629,26 @@ \item In terms of ABAC: subject attribute $att_S:S\rightarrow 2^O$, such that $att_S(s)=\{o\in O|⟨s,o⟩\in H\}$. \end{itemize*} - > Brewer-Nash Security Model - > - > The Brewer-Nash model of the CW policy is a det. $automaton⟨S,O,Q,\sigma,\delta,q_0,R⟩$ where - \begin{itemize*} - \item $S$ and $O$ are sets of subjects (consultants) and (company data) objects, - \item $Q=M\times 2^C\times 2^H$ is the state space where - \begin{itemize*} - \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is the set ofpossible ACMs, - \item $C\subseteq O\times O$ is the conflict relation: $⟨o,o'⟩\in C\Leftrightarrow o$ and $o'$ are competitors, - \item $H\subseteq S\times O$ is the history relation: $⟨s,o⟩\in H\Leftrightarrow s$ has previously - consulted $o$, - \end{itemize*} - \item $\sigma=OP \times X$ is the input alphabet where - \begin{itemize*} - \item $OP=\{read,write\}$ is a set of operations, - \item $X=S \times O$ is the set of arguments of these operations, - \end{itemize*} - \item $\delta:Q \times\sigma\rightarrow Q$ is the state transition function, - \item $q_0\in Q$ is the initial state, - \item $R=\{read,write\}$ is the set of access rights. - \end{itemize*} + \note{Brewer-Nash Security Model}{The Brewer-Nash model of the CW policy is a det. $automaton⟨S,O,Q,\sigma,\delta,q_0,R⟩$ where + \begin{itemize*} + \item $S$ and $O$ are sets of subjects (consultants) and (company data) objects, + \item $Q=M\times 2^C\times 2^H$ is the state space where + \begin{itemize*} + \item $M=\{m|m:S\times O\rightarrow 2^R\}$ is the set ofpossible ACMs, + \item $C\subseteq O\times O$ is the conflict relation: $⟨o,o'⟩\in C\Leftrightarrow o$ and $o'$ are competitors, + \item $H\subseteq S\times O$ is the history relation: $⟨s,o⟩\in H\Leftrightarrow s$ has previously + consulted $o$, + \end{itemize*} + \item $\sigma=OP \times X$ is the input alphabet where + \begin{itemize*} + \item $OP=\{read,write\}$ is a set of operations, + \item $X=S \times O$ is the set of arguments of these operations, + \end{itemize*} + \item $\delta:Q \times\sigma\rightarrow Q$ is the state transition function, + \item $q_0\in Q$ is the initial state, + \item $R=\{read,write\}$ is the set of access rights. + \end{itemize*} + } %![](Assets/Systemsicherheit-brewer-example-2.png) At the time depicted: @@ -3407,15 +2661,15 @@ \paragraph{Brewer-Nash STS} \begin{itemize*} \item Read (here: similar to HRU notation) - $command read(s,o)::=if read \in m(s,o) \wedge\forall ⟨o',o⟩\in C:⟨s,o'⟩\not\in H$ - $then$ - $H:=H\cup\{⟨s,o⟩\}$ - $fi$ + $command read(s,o)::=if read \in m(s,o) \wedge\forall ⟨o',o⟩\in C:⟨s,o'⟩\not\in H$ + $then$ + $H:=H\cup\{⟨s,o⟩\}$ + $fi$ \item Write - $command write(s,o)::=if write \in m(s,o) \wedge\forall o'\in O:o'\not=o \Rightarrow ⟨s,o'⟩\not\in H$ - $then$ - $H:=H\cup\{⟨s,o⟩\}$ - $fi$ + $command write(s,o)::=if write \in m(s,o) \wedge\forall o'\in O:o'\not=o \Rightarrow ⟨s,o'⟩\not\in H$ + $then$ + $H:=H\cup\{⟨s,o⟩\}$ + $fi$ \end{itemize*} Not shown: Discretionary policy portion $\rightarrow$ modifications in m to enable fine-grained rights management. @@ -3424,30 +2678,30 @@ \begin{itemize*} \item Write Command: s is allowed to write $o\Leftrightarrow write\in m(s,o)\wedge\forall o'\in O:o'\not=o\Rightarrow⟨s,o'⟩\not\in H$ \item Why so restrictive? $\rightarrow$ No transitive information flow! - \begin{itemize*} - \item $\rightarrow$ s must never have previously consulted any other client! - \item $\Rightarrow$ any consultant is stuck with her client on first read access - \item $\Rightarrow$ not (yet) a professional model! - \end{itemize*} + \begin{itemize*} + \item $\rightarrow$ s must never have previously consulted any other client! + \item $\Rightarrow$ any consultant is stuck with her client on first read access + \item $\Rightarrow$ not (yet) a professional model! + \end{itemize*} \end{itemize*} \paragraph{Brewer-Nash Model} Instantiation of a Model \begin{itemize*} \item Initial State $q_0$ - \begin{itemize*} - \item $m_0$: consultant assignments to clients, issued by management - \item $C_0$: according to real-life competition - \item $H_0 =\varnothing$ - \end{itemize*} + \begin{itemize*} + \item $m_0$: consultant assignments to clients, issued by management + \item $C_0$: according to real-life competition + \item $H_0 =\varnothing$ + \end{itemize*} \end{itemize*} - > Secure State - > $\forall o,o' \in O,s\in S:⟨s,o⟩\in H_q\wedge⟨s,o'⟩\in H_q\Rightarrow⟨o,o'⟩\not\in C_q$ - > Corollary: $\forall o,o'\in O,s\in S:⟨o,o'⟩\in C_q\wedge⟨s,o⟩\in H_q\Rightarrow ⟨s,o'⟩\not\in H_q$ + \note{Secure State}{$\forall o,o' \in O,s\in S:⟨s,o⟩\in H_q\wedge⟨s,o'⟩\in H_q\Rightarrow⟨o,o'⟩\not\in C_q$ - > Secure Brewer-Nash Model - > Similar to "secure BLP model". + Corollary: $\forall o,o'\in O,s\in S:⟨o,o'⟩\in C_q\wedge⟨s,o⟩\in H_q\Rightarrow ⟨s,o'⟩\not\in H_q$ + } + + \note{Secure Brewer-Nash Model}{Similar to "secure BLP model".} In the exercises: STS, transformation into pure HRU calculus, dynamic subject and object sets. @@ -3456,39 +2710,39 @@ \begin{itemize*} \item Composes DAC and MAC components \item Simple model paradigms - \begin{itemize*} - \item Sets (subjects, objects) - \item ACM (DAC) - \item Relations (company conflicts, consultants history) - \item Simple "read" and "write" rule - \item $\rightarrow$ easy to implement - \end{itemize*} + \begin{itemize*} + \item Sets (subjects, objects) + \item ACM (DAC) + \item Relations (company conflicts, consultants history) + \item Simple "read" and "write" rule + \item $\rightarrow$ easy to implement + \end{itemize*} \item Analysis goals - \begin{itemize*} - \item MAC: Model security - \item DAC: safety properties - \end{itemize*} + \begin{itemize*} + \item MAC: Model security + \item DAC: safety properties + \end{itemize*} \item Drawback: Restrictive write-rule \end{itemize*} Professionalization \begin{itemize*} \item Remember the difference: trusting humans (consultants) vs. trusting software agents (subjects) - \begin{itemize*} - \item Consultants are assumed to be trusted - \item Systems (processes, sessions, etc.) may fail, e. g. due to a malware attack - \end{itemize*} + \begin{itemize*} + \item Consultants are assumed to be trusted + \item Systems (processes, sessions, etc.) may fail, e. g. due to a malware attack + \end{itemize*} \item $\rightarrow$ Write-rule applied not to humans, but to (shorter-lived) software agents $\rightarrow$ mitigating malware effectiveness \item $\rightarrow$ Subject set S models consultant’s subjects (e. g. processes) in a group model: - \begin{itemize*} - \item All processes of one consultant form a group - \item Group members - \begin{itemize*} - \item have the same rights in m - \item have individual histories - \item are strictly isolated w.r.t. IF - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item All processes of one consultant form a group + \item Group members + \begin{itemize*} + \item have the same rights in m + \item have individual histories + \item are strictly isolated w.r.t. IF + \end{itemize*} + \end{itemize*} \item Solution approach: as we already know $\rightarrow$ model refinement! \end{itemize*} @@ -3509,68 +2763,67 @@ \item $\rightarrow$ subject-/object-specific history, $\approx$attributes ("lables") \end{itemize*} - > LR-CW Model - > - > The Least-Restrictive model of the CW policy is a deterministic $automaton ⟨S,O,F,\zeta,Q,\sigma,\delta,q_0⟩$ where - \begin{itemize*} - \item S and O are sets of subjects (consultants) and data objects, - \item F is the set of client companies, - \item $\zeta:O\rightarrow F$ ("zeta") is a function mapping each object to its company, - \item $Q=2^C \times 2^H$ is the state space where - \begin{itemize*} - \item $C\subseteq F\times F$ is the conflict relation: $⟨f,f'⟩\in C\Leftrightarrow f$ and $f'$ are competitors, - \item $H=\{Z_e\subseteq F|e\in S\cup O\}$ is the history set: $f\in Z_e\Leftrightarrow e$ contains information about $f(Z_e$ is the "history label" of $e$), - \end{itemize*} - \item $\sigma=OP\times X$ is the input alphabet where - \begin{itemize*} - \item $OP=\{read,write\}$ is the set of operations, - \item $X=S\times O$ is the set of arguments of these operations, - \end{itemize*} - \item $\delta:Q\times\sigma\rightarrow Q$ is the state transition function, - \item $q_0\in Q$ is the initial state - \end{itemize*} + \note{LR-CW Model}{The Least-Restrictive model of the CW policy is a deterministic $automaton ⟨S,O,F,\zeta,Q,\sigma,\delta,q_0⟩$ where + \begin{itemize*} + \item S and O are sets of subjects (consultants) and data objects, + \item F is the set of client companies, + \item $\zeta:O\rightarrow F$ ("zeta") is a function mapping each object to its company, + \item $Q=2^C \times 2^H$ is the state space where + \begin{itemize*} + \item $C\subseteq F\times F$ is the conflict relation: $⟨f,f'⟩\in C\Leftrightarrow f$ and $f'$ are competitors, + \item $H=\{Z_e\subseteq F|e\in S\cup O\}$ is the history set: $f\in Z_e\Leftrightarrow e$ contains information about $f(Z_e$ is the "history label" of $e$), + \end{itemize*} + \item $\sigma=OP\times X$ is the input alphabet where + \begin{itemize*} + \item $OP=\{read,write\}$ is the set of operations, + \item $X=S\times O$ is the set of arguments of these operations, + \end{itemize*} + \item $\delta:Q\times\sigma\rightarrow Q$ is the state transition function, + \item $q_0\in Q$ is the initial state + \end{itemize*} + } %![](Assets/Systemsicherheit-brewer-example-2.png) \begin{itemize*} \item At the time depicted (before the first write): - \begin{itemize*} - \item Client companies: $F=\{HSBC,DB,Citi,Shell,Esso\}$ - \item History set: $H=\{Z_{Ann},Z_{Bob},Z_{o1} ,...,Z_{o|O|}\}$ with history labels - \begin{itemize*} - \item $Z_{Ann}=\{DB\}$ - \item $Z_{Bob}=\{Citi,Esso\}$, - \item $Z_{oi}=\{\zeta(o_i)\}, 1\leq i\leq |O|$. - \end{itemize*} - \end{itemize*} + \begin{itemize*} + \item Client companies: $F=\{HSBC,DB,Citi,Shell,Esso\}$ + \item History set: $H=\{Z_{Ann},Z_{Bob},Z_{o1} ,...,Z_{o|O|}\}$ with history labels + \begin{itemize*} + \item $Z_{Ann}=\{DB\}$ + \item $Z_{Bob}=\{Citi,Esso\}$, + \item $Z_{oi}=\{\zeta(o_i)\}, 1\leq i\leq |O|$. + \end{itemize*} + \end{itemize*} \end{itemize*} Inside the STS \begin{itemize*} \item a reading operation - \begin{itemize*} - \item requires that no conflicting information is accumulated in the subject potentially increases the amount of information in the subject - \item command read(s,o) ::= if $\forall f,f'\in Z_s \cup Z_o:⟨f,f'⟩\not\in C$ then $Z_s:=Z_s\cup Z_o$ fi - \end{itemize*} + \begin{itemize*} + \item requires that no conflicting information is accumulated in the subject potentially increases the amount of information in the subject + \item command read(s,o) ::= if $\forall f,f'\in Z_s \cup Z_o:⟨f,f'⟩\not\in C$ then $Z_s:=Z_s\cup Z_o$ fi + \end{itemize*} \item a writing operation - \begin{itemize*} - \item requires that no conflicting information is accumulated in the object potentially increases the amount of information in the object - \item command write(s,o) ::= if $\forall f,f'\in Z_s\cup Z_o:⟨f,f'⟩\not\in C$ then $Z_o:=Z_o\cup Z_s$ fi - \end{itemize*} + \begin{itemize*} + \item requires that no conflicting information is accumulated in the object potentially increases the amount of information in the object + \item command write(s,o) ::= if $\forall f,f'\in Z_s\cup Z_o:⟨f,f'⟩\not\in C$ then $Z_o:=Z_o\cup Z_s$ fi + \end{itemize*} \end{itemize*} Model Achievements \begin{itemize*} \item Applicability: more writes allowed in comparison to Brewer-Nash (note that this still complies with the general CW policy) \item Paid for with - \begin{itemize*} - \item Need to store individual attributes of all entities (their history labels $Z_e$) - \item Dependency of write permissions on earlier actions of other subjects - \end{itemize*} + \begin{itemize*} + \item Need to store individual attributes of all entities (their history labels $Z_e$) + \item Dependency of write permissions on earlier actions of other subjects + \end{itemize*} \item More extensions: - \begin{itemize*} - \item Operations to modify conflict relation - \item Operations to create/destroy entities - \end{itemize*} + \begin{itemize*} + \item Operations to modify conflict relation + \item Operations to create/destroy entities + \end{itemize*} \end{itemize*} \paragraph{An MLS Model for Chinese-Wall Policies} @@ -3607,24 +2860,24 @@ \item Class set of a lattice $C=\{DB,Citi,Shell,Esso\}$ \item Entity label: vector of information already present in each business branch (formerly known as conflict classin Brewer-Nash!) \item In our example, a vector consists of 2 elements $\in C$; resulting in labels such as: - \begin{itemize*} - \item $[\epsilon,\epsilon]$ (exclusively for $inf_C$) - \item $[DB,\epsilon]$ (for DB-objects or -consultants) - \item $[DB,Shell]$ (for subjects or objects containing information from both DB and Shell) - \item $[Esso,Shell]$ (illegal label!) - \item ... - \end{itemize*} + \begin{itemize*} + \item $[\epsilon,\epsilon]$ (exclusively for $inf_C$) + \item $[DB,\epsilon]$ (for DB-objects or -consultants) + \item $[DB,Shell]$ (for subjects or objects containing information from both DB and Shell) + \item $[Esso,Shell]$ (illegal label!) + \item ... + \end{itemize*} \end{itemize*} \paragraph{Summary CW} Why is the "Chinese Wall" policy interesting? \begin{itemize*} \item One policy, multiple models: - \begin{itemize*} - \item The Brewer-Nash model demonstrates hybrid DAC-/MAC-/IFC-approach - \item The Least-Restrictive CW model demonstrates a more practical professionalization - \item The MLS-CW model demonstrates applicability of lattice-based IF modeling $\rightarrow$ semantically cleaner approach - \end{itemize*} + \begin{itemize*} + \item The Brewer-Nash model demonstrates hybrid DAC-/MAC-/IFC-approach + \item The Least-Restrictive CW model demonstrates a more practical professionalization + \item The MLS-CW model demonstrates applicability of lattice-based IF modeling $\rightarrow$ semantically cleaner approach + \end{itemize*} \item Applications: Far beyond traditional consulting scenarios...$\rightarrow$ current problems in cloud computing! \end{itemize*} @@ -3633,24 +2886,24 @@ Security Models \begin{itemize*} \item Formalize informal security policies for the sake of - \begin{itemize*} - \item objectification by unambiguous calculi - \item explanation and (possibly) proof of security properties (e.g. HRU safety, BLP security, NI security) by formal analysis techniques - \item foundation for correct implementations - \end{itemize*} + \begin{itemize*} + \item objectification by unambiguous calculi + \item explanation and (possibly) proof of security properties (e.g. HRU safety, BLP security, NI security) by formal analysis techniques + \item foundation for correct implementations + \end{itemize*} \item Are composed of simple building blocks - \begin{itemize*} - \item E.g. ACMs, sets, relations, functions, lattices, state machines - \item ... that are combined and interrelated to form more complex models - \item $\rightarrow$ (D)RBAC, (D)ABAC, BLP, Brewer-Nash, LR-CW, MLS-CW - \end{itemize*} + \begin{itemize*} + \item E.g. ACMs, sets, relations, functions, lattices, state machines + \item ... that are combined and interrelated to form more complex models + \item $\rightarrow$ (D)RBAC, (D)ABAC, BLP, Brewer-Nash, LR-CW, MLS-CW + \end{itemize*} \end{itemize*} Remember: Goals of Security Models \begin{itemize*} \item Unambiguous policy formalization to - 1. reason about policy correctness - 2. correctly implement a policy + 1. reason about policy correctness + 2. correctly implement a policy \end{itemize*} @@ -3718,57 +2971,57 @@ \item E.g. DABAC: State transition scheme (matrix and predicates) \item E.g. Brewer/Nash Chinese Wall model: "$\wedge$" (simple, because $H+C\not= m$) \item E.g. BLP - \begin{itemize*} - \item BLP read rule - \item BLP write rule - \item BST - \item (much more complex, because rules restrict m by L and cl ) - \end{itemize*} + \begin{itemize*} + \item BLP read rule + \item BLP write rule + \item BST + \item (much more complex, because rules restrict m by L and cl ) + \end{itemize*} \end{itemize*} $\rightarrow$ Model Engineering Principles \begin{itemize*} \item Core model \item Core specialization, e.g. - \begin{itemize*} - \item $Q = 2^S\times 2^O \times M$ (HRU) - \item $Q = M\times CL$ (BLP) - \end{itemize*} + \begin{itemize*} + \item $Q = 2^S\times 2^O \times M$ (HRU) + \item $Q = M\times CL$ (BLP) + \end{itemize*} \item Core extension, e.g. - \begin{itemize*} - \item e.g. $L$ (BLP) - \item $T$ (TAM) - \item $D, dom ,=_{NI}$ (NI) - \end{itemize*} + \begin{itemize*} + \item e.g. $L$ (BLP) + \item $T$ (TAM) + \item $D, dom ,=_{NI}$ (NI) + \end{itemize*} \item Component glue, e.g. - \begin{itemize*} - \item Chinese Wall: DAC "$\wedge$" MAC in AS - \item BLP: complex relation between ACM and lattice - \item $\rightarrow$ BLP security, BLP BST - \end{itemize*} + \begin{itemize*} + \item Chinese Wall: DAC "$\wedge$" MAC in AS + \item BLP: complex relation between ACM and lattice + \item $\rightarrow$ BLP security, BLP BST + \end{itemize*} \end{itemize*} You should have mastered now: A basic tool set for model-based security policy engineering \begin{itemize*} \item A stock of basic security model abstractions - \begin{itemize*} - \item ACFs and ACMs - \item Model states and transitions defined by an STS - \item Attributes (roles, confidentiality classes, information contents, location, ...) - \item Information flows - \end{itemize*} + \begin{itemize*} + \item ACFs and ACMs + \item Model states and transitions defined by an STS + \item Attributes (roles, confidentiality classes, information contents, location, ...) + \item Information flows + \end{itemize*} \item A stock of formal model building blocks - \begin{itemize*} - \item Sets, functions, relations - \item Deterministic automatons - \item Graphs and lattices - \end{itemize*} + \begin{itemize*} + \item Sets, functions, relations + \item Deterministic automatons + \item Graphs and lattices + \end{itemize*} \item A stock of standard, off-the-shelf security models \item Methods and techniques - \begin{itemize*} - \item for model-based proof of policy properties properties - \item for combining basic model building blocks into new, application-oriented security models - \end{itemize*} + \begin{itemize*} + \item for model-based proof of policy properties properties + \item for combining basic model building blocks into new, application-oriented security models + \end{itemize*} \end{itemize*} \subsection{Model Specification} @@ -3781,60 +3034,60 @@ To Do \begin{itemize*} \item How to convert a formal model into an executable policy? - \begin{itemize*} - \item $\rightarrow$ Policy specification languages - \end{itemize*} + \begin{itemize*} + \item $\rightarrow$ Policy specification languages + \end{itemize*} \item How to enforce an executable policy in a system? - \begin{itemize*} - \item $\rightarrow$ security mechanisms and architectures (Chapters 5 and 6) - \end{itemize*} + \begin{itemize*} + \item $\rightarrow$ security mechanisms and architectures (Chapters 5 and 6) + \end{itemize*} \end{itemize*} Role of Specification Languages: Same as in software engineering \begin{itemize*} \item To bridge the gap between - \begin{itemize*} - \item Abstractions of security models (sets, relations, ...) - \item Abstractions of implementation platforms (security mechanisms such as ACLs, krypto-algorithms, Security Server ...) - \end{itemize*} + \begin{itemize*} + \item Abstractions of security models (sets, relations, ...) + \item Abstractions of implementation platforms (security mechanisms such as ACLs, krypto-algorithms, Security Server ...) + \end{itemize*} \item Foundation for - \begin{itemize*} - \item Code verification - \item Or even more convenient: Automated code generation - \end{itemize*} + \begin{itemize*} + \item Code verification + \item Or even more convenient: Automated code generation + \end{itemize*} \end{itemize*} Approach \begin{itemize*} \item Abstraction level: - \begin{itemize*} - \item Step stone between model and security mechanisms - \item $\rightarrow$ More concrete than models - \item $\rightarrow$ More abstract than programming languages (“what” instead of “how“) - \end{itemize*} + \begin{itemize*} + \item Step stone between model and security mechanisms + \item $\rightarrow$ More concrete than models + \item $\rightarrow$ More abstract than programming languages (“what” instead of “how“) + \end{itemize*} \item Expressive power: - \begin{itemize*} - \item Domain-specific; for representing security models only - \item $\rightarrow$ Necessary: adequate language paradigms - \item $\rightarrow$ Sufficient: not more than necessary (no dead weight) - \end{itemize*} + \begin{itemize*} + \item Domain-specific; for representing security models only + \item $\rightarrow$ Necessary: adequate language paradigms + \item $\rightarrow$ Sufficient: not more than necessary (no dead weight) + \end{itemize*} \end{itemize*} Domains \begin{itemize*} \item Model domain - \begin{itemize*} - \item e.g. AC models (TAM, RBAC, ABAC) - \item e.g. IF models (MLS) - \item e.g. NI models - \end{itemize*} + \begin{itemize*} + \item e.g. AC models (TAM, RBAC, ABAC) + \item e.g. IF models (MLS) + \item e.g. NI models + \end{itemize*} \item Implementation domain - \begin{itemize*} - \item OS - \item Middleware - \item Applications - \end{itemize*} + \begin{itemize*} + \item OS + \item Middleware + \item Applications + \end{itemize*} \end{itemize*}