<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected {color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}
.readOnly {background:[[ColorPalette::TertiaryPale]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:alpha(opacity=60);}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0; top:0;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0 0 0.5em;}
.tab {margin:0 0 0 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0 0.25em; padding:0 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0 3px 0 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0; font-size:.9em;}
.editorFooter .button {padding-top:0; padding-bottom:0;}

.fieldsetFix {border:0; padding:0; margin:1px 0px;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
<!--{{{-->
<div class='header' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser

Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])

<<option txtUserName>>
<<option chkSaveBackups>> [[SaveBackups]]
<<option chkAutoSave>> [[AutoSave]]
<<option chkRegExpSearch>> [[RegExpSearch]]
<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
<<option chkAnimate>> [[EnableAnimations]]

----
Also see [[AdvancedOptions]]
<<importTiddlers>>
Cosmology today is a vibrant scientific enterprise. New precision measurements are revealing a universe with surprising and unexpected properties, in particular the Dark Matter and Dark Energy which are now believed to be the dominant components of the cosmos. Galaxy surveys such as the Sloan Digital Sky Survey are making the first large-scale maps of the universe, and satellites such as WMAP are making exquisitely precise measurements of the Cosmic Microwave Background (CMB), the haze of relic photons left over from the Big Bang. In turn, these measurements are giving us clues which are helping to unravel one of the oldest and most profound questions people have ever asked: Where did the universe come from? In these lectures, I discuss what is currently the best motivated and most completely developed physical model for the first moments of the universe: cosmological inflation  ```Guth (1981)  [[The Inflationary Universe: A Possible Solution to the Horizon and Flatness Problems|http://inspirehep.net/search?ln=en&p=Guth:1980zm&of=hd]]```  ```Linde (1982)  [[A New Inflationary Universe Scenario: A Possible Solution of the Horizon, Flatness, Homogeneity, Isotropy and Primordial Monopole Problems|http://inspirehep.net/search?ln=en&p=Linde:1981mu&of=hd]]```  ```Albrecht & Steinhardt (1982)  [[Cosmology for Grand Unified Theories with Radiatively Induced Symmetry Breaking|http://inspirehep.net/search?ln=en&p=Albrecht:1982wi&of=hd]]``` . (Inflation in its current form was introduced by Guth, but similar ideas had been discussed before ```E. B. Gliner,
Sov. Phys.-JETP ''22'', 378 (1966).``` ```E. B. Gliner and I. G. Dymnikova, 
Sov. Astron. Lett. ''1'', 93 (1975).``` . A short history of the early development of inflation can be found in Ref.  ```Linde (2008)  [[Inflationary Cosmology|http://inspirehep.net/search?ln=en&p=Linde:2007fr&of=hd]]``` .)  Inflation naturally explains how the universe came to be so large, so old, and so flat, and provides a compellingly elegant and predictive mechanism for generating the primordial perturbations which gave rise to the rich structure we see in the universe today  ```Starobinsky (1979)  [[Relict Gravitation Radiation Spectrum and Initial State of the Universe. (In Russian)|http://inspirehep.net/search?ln=en&p=Starobinsky:1979ty&of=hd]]```  ```Mukhanov & Chibisov (1981)  [[Quantum Fluctuation and Nonsingular Universe. (In Russian)|http://inspirehep.net/search?ln=en&p=Mukhanov:1981xt&of=hd]]```  ```Hawking (1982)  [[The Development of Irregularities in a Single Bubble Inflationary Universe|http://inspirehep.net/search?ln=en&p=Hawking:1982cz&of=hd]]```  ```Hawking & Moss (1983)  [[FLUCTUATIONS IN THE INFLATIONARY UNIVERSE|http://inspirehep.net/search?ln=en&p=Hawking:1982my&of=hd]]```  ```Starobinsky (1982)  [[Dynamics of Phase Transition in the New Inflationary Universe Scenario and Generation of Perturbations|http://inspirehep.net/search?ln=en&p=Starobinsky:1982ee&of=hd]]```  ```Guth & Pi (1982)  [[Fluctuations in the New Inflationary Universe|http://inspirehep.net/search?ln=en&p=Guth:1982ec&of=hd]]```  ```Bardeen, //et al.,// (1983)  [[Spontaneous Creation of Almost Scale - Free Density Perturbations in an Inflationary Universe|http://inspirehep.net/search?ln=en&p=Bardeen:1983qw&of=hd]]``` . Inflation provides a link between the Outer Space of astrophysics and the Inner Space of particle physics, and gives us a window to physics at energy scales far beyond the reach of particle accelerators. Furthermore, inflation makes //testable// predictions, which have so far proven to be an excellent match to the data. 

The lectures are at an advanced undergraduate or beginning graduate student level. Most of the the lectures should be accessible with only a background in Special Relativity, although working knowledge of General Relativity and quantum field theory are helpful for the more advanced sections.  Where possible, I reference review articles for further reading on related topics. For other reviews on inflation, see Refs.  ```Lyth & Riotto (1999)  [[Particle physics models of inflation and the cosmological density perturbation|http://inspirehep.net/search?ln=en&p=Lyth:1998xn&of=hd]]```  ```Watson (2000)  [[An Exposition on inflationary cosmology|http://inspirehep.net/search?ln=en&p=Watson:2000hb&of=hd]]```  ```Riotto (2002)  [[Inflation and the theory of cosmological perturbations|http://inspirehep.net/search?ln=en&p=Riotto:2002yw&of=hd]]```  ```Lineweaver (2003)  [[Inflation and the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Lineweaver:2003ie&of=hd]]```  ```Kinney (2003)  [[Cosmology, inflation, and the physics of nothing|http://inspirehep.net/search?ln=en&p=Kinney:2003xf&of=hd]]```  ```Trodden & Carroll (2004)  [[TASI lectures: Introduction to cosmology|http://inspirehep.net/search?ln=en&p=Trodden:2004st&of=hd]]```  ```Linde (2008)  [[Inflationary Cosmology|http://inspirehep.net/search?ln=en&p=Linde:2007fr&of=hd]]```  ```Baumann & Peiris (2009)  [[Cosmological Inflation: Theory and Observations|http://inspirehep.net/search?ln=en&p=Baumann:2008bn&of=hd]]``` .

These lectures are based on:
*  [[2002 Lectures|http://arxiv.org/abs/astro-ph/0301448]]  at the NATO Advanced Study Institute on Techniques and Concepts of High Energy Physics, St. Croix, USVI.  
* [[2007 Lectures|http://www.physics.buffalo.edu/whkinney/talks/w_kinney_ppcs_notes.pdf]] at the Perimeter Institute for Theoretical Physics. 
>Video: [[Day 1|http://pirsa.org/07080017]], [[Day 2|http://pirsa.org/07080021]], [[Day 3|http://pirsa.org/07080024]].
* [[2008 Dirac Lectures|http://www.hep.fsu.edu/~bfield/dirac.html]] at Florida State University.
* 2008 Lectures at the Research Training Group, university of W&uuml;rzburg, Germany. 
* [[2008 Lectures|http://arxiv.org/abs/0902.1529]] at the Theoretical Advanced Study institute at University of Colorado, Boulder.

The fundamental object in General Relativity is the //metric//, which encodes the shape of the spacetime. A metric is a symmetric, bilinear form which defines distances on a manifold. For example, we can express Pythagoras' theorem in a Euclidean three-dimensional space,
\begin{equation}
\ell^2 = x^2 + y^2 + z^2,
\end{equation}
as a matrix product over the identity matrix \(\delta_{ij} = {\rm diag}\left(1,1,1\right)\),
\begin{equation}
\ell^2 = \sum_{i,j = 1,3} \delta_{ij} x^i x^j.
\end{equation}
Therefore the identity matrix \(\delta_{ij}\) can be identified as the metric for the Euclidean space: if we wish to describe a non-Euclidean manifold, we replace \(\delta_{i j}\) with a more complicated matrix \(g_{i j}\), which in general can depend on the coordinates \(x^i\). For an arbitrary path through the space, we express distances on the manifold in differential form,
\begin{equation}
d\ell^2 = \sum_{i, j}{g_{ij} dx^i dx^j}.
\end{equation}
The distance along any path in the spacetime, or //world line//, is then given by integrating \(d\ell\) along that path. A familiar example of a non-Euclidean space frequently used in physics is the Minkowski Space describing spacetime in Special Relativity. Distances along a world line in Minkowski Space are measured by the //proper time//, which is the time as measured by an observer traveling on that world line. The proper time  \(s\) along a world line is given by the relation
\begin{eqnarray}
ds^2 &&= dt^2 - d{\bf x}^2\cr
&&= \sum_{\mu,\nu = 0, 3} \eta_{\mu \nu} dx^\mu dx^\nu,
\end{eqnarray}
where we take the speed of light \(c = 1\). We express four-vectors as \({\tilde x} = (t, x, y, z) = (x^0, x^1, x^2, x^3)\), and \(d{\bf x}^2 = dx^2 + dy^2 + dz^2\) is the Euclidean distance along a spatial interval. The metric \(\eta_{\mu \nu}\) for Minkowski Space is given by
\begin{equation}
\eta_{\mu\nu} = \left(\begin{array}{cccc}
1& & & \\
 &-1& & \\
 & &-1& \\
 & & &-1
\end{array}\right).
\end{equation}
Anything traveling the speed of light has velocity \(d\left\vert{\bf x}\right\vert / dt = 1\). Photons therefore always travel along world lines of zero proper time, \(ds^2 = dt^2 - d{\bf x}^2 = 0\), called //null geodesics//. Massive particles travel along world lines with real proper time, \(ds^2 > 0\), called //timelike geodesics//. Causally disconnected regions of spacetime are separated by //spacelike// intervals, with \(ds^2 < 0\). The set of all null geodesics passing through a given point (or //event//) in spacetime is called the //light cone//.  The interior of the light cone, consisting of all null and timelike geodesics, defines the region of spacetime causally related to that event. 


<<tiddler [[Figure: Minkowski Light Cones]]>>





The Minkowski metric \(\eta_{\mu \nu}\) of Special Relativity describes a Euclidean spacetime which is static, empty, and infinite in space and time. The addition of gravity to the picture requires General Relativity, which describes gravitational fields as curvature in the spacetime. The fundamental object in General Relativity is the metric \(g_{\mu \nu}\left({\tilde x}\right)\), which describes the shape of the spacetime and in general depends on the spacetime coordinate \({\tilde x}\). As in Minkowski Space, lengths in curved spacetime are measured by the proper time \(s\), with the proper time along a world line determined by the metric
\begin{equation}
ds^2 = \sum_{\mu,\nu = 0, 3} g_{\mu \nu}\left({\tilde x}\right) dx^\mu dx^\nu.
\end{equation}
As in Special Relativity, photons travel along null geodesics, with \(ds^2 = 0\), and massive particles travel along timelike geodesics, with \(ds^2 > 0\). However, unlike Special Relativity, null geodesics need not always be \(45^\circ\) lines defining light cones, but can be curved by gravity. 

In General Relativity, the distribution of mass/energy in the spacetime determines the shape of the metric, and the metric in turn determines the evolution of the mass/energy. Electromagnetism provides a convenient analogy: in electromagnetism, the distribution of charges and currents determines the electromagnetic field, and the electromagnetic field in turn determines the evolution of the charges and currents. Given a current four-vector \(J^{\mu}\), Maxwell's Equations are a set of linear, first-order partial differential equations that allow us to calculate the resulting electromagnetic field
\begin{equation}
\partial_\nu F^{\mu \nu} \equiv \sum_{\nu = 0, 3} \partial_\nu F^{\mu \nu} = \frac{4 \pi}{c} J^{\mu}.
\end{equation}
Here we have explicitly included the speed of light \(c\) to highlight its role as an electromagnetic coupling constant. We also adopt the typical summation convention for relativity: repeated indices are implicitly summed over. In General Relativity, we describe the distribution of mass/energy in a covariant way by specifying a symmetric rank-2 //stress-energy// tensor \(T_{\mu \nu}\), which acts as a source for the gravitational field similar to the way the current four-vector \(J^{\mu}\) sources electromagnetism. The analog of Maxwell's Equations is the Einstein Field Equation, which can be written in the deceptively simple form
<<tiddler [[eq:EFE]]>>
where the coupling constant is Newton's gravitational constant \(G\). The tensor \(G_{\mu\nu}\), called the Einstein Tensor, is a symmetric \(4 \times 4\) tensor consisting of the metric \(g_{\mu\nu}\) and its first and second derivatives. The Einstein Field Equation therefore represents a set of ten coupled, nonlinear, second-order partial differential equations of ten free functions, which are the elements of the metric tensor \(g_{\mu \nu}\). However, only six of these equations are actually indpendent, leaving four degrees of freedom. The physics of gravity is independent of coordinate system, and the additional degrees of freedom correspond to a choice of a coordinate system, or //gauge// on the four-dimensional space. Gravity is //much// more complicated than electromagnetism! As with any intractably complicated problem, we simplify the job by introducing a symmetry. In General Relativity there are a number of symmetries which allow either exact or perturbative solution to the Einstein Field Equations:

* Vacuum: \(T_{\mu \nu} = 0\). If we evaluate the Einstein Field Equations for small perturbations about an empty Minkowski Space, we find that they reduce at lowest order to a wave equation, and therefore General Relativity predicts the existence of gravity waves.}
* Spherical Symmetry. If we assume a spherically symmetric spacetime (also empty of matter, \(T_{\mu \nu} = 0\)) the Einstein Field Equation can be solved exactly, resulting in the Schwarzschild solution for black holes.}
* Homogeneity and Isotropy. If we assume that the stress-energy is distributed in a fashion which is homogeneous and isotropic, this is called a //Friedmann-Robertson-Walker// (FRW) space, and is the case of interest for cosmology. Since the homogeneity and isotropy remove all spatial dependence, the Einstein Field Equations reduce from a set of partial differential equations to a set of nonlinear ordinary differential equations in time. For particular types of homogeneous, isotropic matter, these equations can be solved exactly, and perturbations about those exact solutions can be handled self-consistently.

Continuing the analogy with electromagnetism, the equivalent of charge conservation,
\begin{equation}
\partial_\mu J^\mu = \frac{\partial \rho}{\partial t} + \nabla \cdot {\bf j} = 0,
\end{equation}
in General Relativity is stress-energy conservation
<<tiddler [[eq:SEconservation]]>>
where \(D_\mu\) represents a covariant derivative, which is a generalization of the partial derivative to a curved manifold. We will also denote covariant derivatives with a semicolon, for example \(T^{\mu\nu}{}_{\!;\nu} = 0\). Likewise, simple partial derivatives are denoted with a comma, \(\partial f / \partial x^{\mu} \equiv \partial_\mu f \equiv f_{,\mu}\). As in the case of electromagnetism, where the charge conservation equation is not independent, but is instead a consequence of Maxwell's Equations, stress-energy conservation in General Relativity is a consequence of the Einstein Field Equations and does not independently constrain the solutions. In the next section, we discuss FRW spaces and their application to cosmology in more detail. 
A //homogeneous// space is one which is translationally invariant, or the same at every point. An //isotropic// space is one which is rotationally invariant, or the same in every direction. The two are not the same: a space which is everywhere isotropic is necessarily homogeneous, but a space which is homogeneous is not necessarily isotropic. (Consider, for example a space with a uniform electric field: it is translationally invariant but not rotationally invariant.) It is possible to show ```Weinberg,
//Gravitation and Cosmology: Principles and Applications of the General Theory of Relativity//
 (1972) Ch. 13.```  that the most general metric consistent with homogeneity and isotropy is obtained by multiplying a static spatial geometry with a time-dependent //scale factor// \(a(t)\):
<<tiddler [[eq:generalFRWmetric]]>>
where we have expressed the spatial line element in terms of spherical coordinates \(r\),\(\theta\),\(\phi\), and the solid angle is given by the usual \(d\Omega^2 = \sin{\theta} d\theta d\phi\). The constant \(k\) defines the curvature of the spacetime, with \(k = 0\) corresponding to flat (Euclidean) spatial sections, and \(k = \pm 1\) corresponding to positive and negative curvatures, respectively. A spacetime of this general form is called a //Friedmann-Robertson-Walker// (FRW) spacetime. Likewise, the most general homogeneous, isotropic stress-energy is diagonal, with all of its spatial components identical,
<<tiddler [[eq:fluidstressenergy]]>>
where we identify the energy density \(\rho\) and the pressure \(p\) from the continuity equation arising from stress-energy conservation,
<<tiddler [[eq:continuity]]>>

The Einstein field equations then reduce to a set of two coupled, non-linear ordinary differential equations,
<<tiddler [[eq:generalFRW]]>>
The first is called the //Friedmann Equation//, and the second is called the //Raychaudhuri Equation//. Note that the equations for the evolution of the scale factor depend not only on the energy density \(\rho\), but also the pressure \(p\): pressure gravitates!  The [[continuity equation |eq:continuity]] is //not// independent of the [[Einstein Field Equations|eq:generalFRW]], but can be derived directly from the Friedmann and Raychaudhuri Equations. The expansion rate \(\dot a / a\) is called the //Hubble parameter// \(H\):
\begin{equation}
H \equiv \frac{\dot a}{a},
\end{equation}
and has units of inverse time. A positive Hubble parameter \(H > 0\) corresponds to an expanding universe, and a negative Hubble parameter \(H < 0\) corresponds to a collapsing universe. (Since our actual universe is expanding, we will specialize to that case.) Minkowski Space can be recovered by assuming a flat geometry \(k = 0\), and no expansion, \(\dot a = 0\). The Hubble parameter sets the fundamental scale of the spacetime, //i.e.// a characteristic time is the //Hubble time// \(t \sim H^{-1}\), and likewise the //Hubble length// is \(d \sim H^{-1}\). We will see later that the Hubble time sets the scale for the age of the universe, and the Hubble length sets the scale for the size of the observable universe. 

The coordinate system \(\left(t,{\bf x}\right)\) is called a //comoving// coordinate system, because observers with constant comoving coordinates are at rest relative to the expansion, //i.e.// two observers with constant separation in comoving coordinates \(\Delta{\bf x}\) have a physical, or //proper//, separation which increases in proportion to the scale factor
\begin{equation}
\Delta{\bf x}_{\rm prop} = a\left(t\right) \Delta{\bf x}_{\rm com}.
\end{equation}
An important kinematic effect of cosmological expansion is the phenomenon of //cosmological redshift//: we will see later that solutions to the wave equation in an FRW space have constant wavelength in //comoving// coordinates, so that the proper wavelength of (for example) a photon increases in time in proportion to the scale factor
\begin{equation}
\lambda \propto a\left(t\right).
\end{equation}
For a photon emitted at time \(t_{\rm em}\) and detected at time \(t_0\), the redshift \(z\) is defined by:
\begin{equation}
\left(1 + z\right) \equiv \frac{\lambda_0}{\lambda_{\rm em}} = \frac{a\left(t_0\right)}{a\left(t_{\rm em}\right)}.
\end{equation}
(Here we introduce the convention used frequently in cosmology that a subscript \(0\) refers to the //current// time, not an initial time.) Note that the cosmological redshift is //not// a Doppler shift caused by the relative velocity of the source and detector, but is an expansion effect: the wavelength of a photon traveling through the spacetime increases because the underlying spacetime is expanding. Another way to look at this is that a photon traveling through an FRW spacetime loses momentum with time,
<<tiddler [[eq:freqredshift]]>>
By the equivalence principle, this momentum loss must apply to massive particles as well as photons: //any// particle moving in an expanding FRW spacetime will lose momentum as \(p \propto a^{-1}\). For massless particles like photons, this is manifest as a redshift in the wavelength, but it means that a massive particle will asymptotically come to rest relative to the comoving coordinate system. Thus, comoving coordinates represent a preferred reference frame reminiscent of Aristotelian physics: any free body with a "peculiar" velocity relative to the comoving frame will eventually come to rest in that frame. 

There are three possibilities for the curvature of the universe: flat (\(k = 0\)), positively curved (\(k = +1\)), or negatively curved (\(k = -1\)). The current value of the Hubble parameter is (from the Hubble Space Telescope Key Project  ```Freedman, //et al.,// (2001)  [[Final results from the Hubble Space Telescope key project to measure the Hubble constant|http://inspirehep.net/search?ln=en&p=Freedman:2000cf&of=hd]]``` ),
\begin{equation}
H_0 = 72 \pm 8\ {\rm km/s/Mpc}.
\end{equation}
Therefore, we can see from the [[Friedmann Equation|eq:generalFRW]] that, given the expansion rate \(H\), the curvature is determined by the density:
\begin{equation}
k = a^2 \left(\frac{8 \pi}{3 m_{\rm Pl}^2} \rho - H^2\right).
\end{equation}
Note that only the //sign// of \(k\) is physically important, since any rescaling of \(k\) is equivalent to a rescaling of the scale factor \(a\). We define the //critical density// as the density for which \(k = 0\), corresponding to a geometrically flat universe,
\begin{equation}
\rho_c \equiv \frac{3 m_{\rm Pl}^2}{8 \pi} H^2\ \Rightarrow\ k = 0.
\end{equation}
For \(\rho > \rho_c\), the universe is positively curved and //closed//, with finite volume, and for \(\rho < \rho_c\), the universe is negatively curved and //open//, with infinite volume. We express the ratio of the actual density \(\rho\) to the critical density \(\rho_c\) as the parameter \(\Omega\):
<<tiddler [[eq:defOmega]]>>
(Do not confuse the density parameter \(\Omega\) with the solid angle \(d\Omega\) in the [[FRW metric|eq:generalFRWmetric]]!) TThe relation between density, curvature, and geometry is then:
* \(\Omega = 1\) :  \(k = 0\)  (flat)
* \(\Omega > 1\) : \(k = 1\)  (closed)
* \(\Omega < 1\) : \(k = 0\) (open)
The density parameter \(\Omega\) is not in general constant in time, and we can re-write the Friedmann Equation as
<<tiddler [[eq:FRWOmega]]>>
Since the Hubble parameter is proportional to the inverse time \(H \propto t^{-1}\), we see that the time-dependence of \(\Omega\) is determined by the time dependence of the scale factor \(a\left(t\right)\). In the next section, we tackle the problem of solving for \(a\left(t\right)\). 
General relativity combined with homogeneity and isotropy leads to a startling conclusion: spacetime is dynamic. The universe is not static, but is bound to be either expanding or contracting. In the early 1900's, Einstein applied general relativity to the homogeneous and isotropic case, and upon seeing the consequences, decided that the answer had to be wrong. Since the universe was obviously static, the equations had to be fixed. Einstein's method for fixing the equations involved the evolution of the density \(\rho\) with expansion. Returning to our analogy between General Relativity and electromagnetism, we remember that Maxwell's equations imply the conservation of charge,
\begin{equation}
\partial_\mu J^{\mu} = 0,
\end{equation}
or, in vector notation,
\begin{equation}
{\partial  \rho \over \partial t} + \nabla \cdot {\bf J} = 0.
\end{equation}

The general relativistic equivalent to charge conservation is stress-energy conservation,
\begin{equation}
D_{\mu} T^{\mu \nu} = 0.
\end{equation}
Einstein noticed that if we take the stress-energy \(T_{\mu \nu}\) and add a constant \(\Lambda\), the conservation equation is unchanged:
\begin{equation}
D_{\mu} T^{\mu \nu} = D_{\mu} \left(T^{\mu \nu} + \Lambda g^{\mu \nu}\right) = 0.
\end{equation}
In our analogy with electromagnetism, this is like adding a constant to the electromagnetic potential, \(V'(x) = V(x) + \Lambda\). The constant \(\Lambda\) does not affect local dynamics in any way, but it does affect the cosmology. Since adding this constant adds a constant energy density to the universe, the continuity equation tells us that this is equivalent to a fluid with //negative// pressure, \(p_{\Lambda} = -\rho_{\Lambda}\). Einstein chose  \(\Lambda\) to give a closed, static universe as follows  ```Weinberg, //Gravitation and Cosmology// (1972), pp. 613.``` . Take the energy density to consist of matter
\begin{eqnarray}
\rho_{\rm M} &&= {k \over 4 \pi G a^2}\cr
p_{\rm M} && = 0,
\end{eqnarray}
and cosmological constant
\begin{eqnarray}
\rho_{\Lambda} &&= {k \over 8 \pi G a^2}\cr
p_{\rm \Lambda} && =  - \rho_{\Lambda}.
\end{eqnarray}
It is a simple matter to use the Friedmann equation to show that this combination of matter and cosmological constant leads to a static universe \(\dot a = \ddot a = 0\). In order for the energy densities to be positive, the universe must be closed, \(k = +1\). Einstein was able to add a kludge to get the answer he wanted.

Things sometimes happen in science with uncanny timing. In the 1920's, an astronomer named Edwin Hubble undertook a project to measure the distances to the spiral "nebulae" as they had been known, using the 100-inch Mount Wilson telescope. Hubble's method involved using [[Cepheid variables|https://en.wikipedia.org/wiki/Cepheid_variable]], named after the star Delta Cephei, the best known member of the class.```Delta Cephei is not, however the nearest Cepheid. That honor goes to Polaris, the north star  (Kervella, //et al.,// (2012)  [[Interferometry, spectroscopy and astrometry of the bright eclipsing system Delta Velorum|http://inspirehep.net/search?ln=en&p=hipparcos&of=hd]]).``` Cepheid variables have the useful property that the period of their variation, usually 10-100 days, is correlated to their absolute brightness. Therefore, by measuring the apparent brightness and the period of a distant Cepheid, one can determine its absolute brightness and therefore its distance. Hubble applied this method to a number of nearby galaxies, and determined that almost all of them were receding from the earth. Moreover, the more distant the galaxy was, the faster it was receding, according to a roughly linear relation:
\begin{equation}
v = H_0 d.
\end{equation}
This is the famous Hubble Law, and the constant \(H_0\) is known as Hubble's constant. Hubble's original value for the constant was something like \(500\ {\rm km/sec/Mpc}\). Since one megaparsec (\({\rm Mpc}\)) is a bit over 3 million light years, this implied an age for the universe of about a billion years, and contradicted known geological estimates for the age of the earth. Cosmology had its first "age problem": the universe can't be younger than the things in it! Later it was realized that Hubble had failed to account for two distinct types of Cepheids, and once this discrepancy was taken into account, the Hubble constant fell to well under \(100\ {\rm km/s/Mpc}\). The current best estimate, determined using the Hubble space telescope to resolve Cepheids in galaxies at unprecedented distances, is \(H_0 = 72 \pm 8\ {\rm km/s/Mpc}\)   ```Freedman, //et al.,// (2001)  [[Final results from the Hubble Space Telescope key project to measure the Hubble constant|http://inspirehep.net/search?ln=en&p=Freedman:2000cf&of=hd]]``` . In any case, the Hubble law is exactly what one would expect from the Friedmann equation. The expansion of the universe predicted (and rejected) by Einstein had been observationally detected, only a few years after the development of General Relativity. Einstein is said to have later referred to the introduction of the cosmological constant as his "greatest blunder" (a quote which may be apocryphal: see p. 9 of the review by Padmanabhan```Padmanabhan (2003)  [[Cosmological constant: The Weight of the vacuum|http://inspirehep.net/search?ln=en&p=Padmanabhan:2002ji&of=hd]]``` ).

In the previous section, we considered the form and kinematics of FRW spaces, but not the //dynamics//, that is, how does the stress-energy of the universe determine the expansion history? The answer to this question depends on what kind of matter dominates the cosmological stress-energy. In this section, we consider three basic types of cosmological stress-energy: matter, radiation, and vacuum. 

The simplest kind of cosmological stress-energy is generically referred to as //matter//. Imagine a comoving box with sides of length \(L\). By //comoving// box, we mean a box whose corners are at rest in a comoving coordinate system, and whose proper dimension is therefore increasing proportional to the scale factor, \(L_{\rm prop} \propto a\). That is, the box is growing with the expansion of the universe. Now imagine the box filled with \(N\) particles of mass \(m\), also at rest in the comoving reference frame.

<<tiddler [[fig:boxofmatter]]>>

In units where \(c = 1\), the relativistic energy density of such a system of particles is given by
\begin{equation}
\rho_{\rm m} = \frac{M N}{V},
\end{equation}
where \(V\) is the //proper// volume of the box, \(V = L_{\rm prop}^3 \propto a^3\). Since neither \(M\) nor \(N\) change with expansion, we have immediately that
\begin{equation}
\rho_{\rm m} = \frac{M N}{L^3 a^3} \propto a^{-3},
\end{equation}
where \(L\) is the comoving size of the box. So the proper energy density of massive particles at rest in a comoving volume evolves as the inverse cube of the scale factor. Now imagine the same box filled with \(N\) photons with frequency \(\nu\).

<<tiddler [[fig:boxofradiation]]>>

The energy per photon is \(h \nu\), so that the energy density in the box is then 
\begin{equation}
\rho_{\gamma} = \frac{N h \nu}{V}.
\end{equation}

As in the case of massive particles, the number density of photons in the box redshifts inversely with the proper volume of the box \(n = N / V \propto a^{-3}\). But each photon also loses energy through [[cosmological redshift|eq:freqredshift]], \(\nu \propto a^{-1}\) eq:freqredshift]], so that the total energy density in photons or other massless degrees of freedom, which we generically refer to as //radiation//, redshifts as
\begin{equation}
\rho_{\gamma} \propto a^{-4}.
\end{equation}
Note also that cosmological redshift immediately gives us a rule for the behavior of a black-body spectrum of radiation with temperature \(T\). Since all photons redshift at exactly the same rate, a system with starts out as a black-body //stays// a black-body, with a temperature that decreases with expansion,
\begin{equation}
T_{\gamma} \propto a^{-1}.
\end{equation}

The third type of stress-energy which is important in cosmology dates back to Einstein's introduction of a "cosmological constant" to his field equations. If we take the stress-energy \(T_{\mu \nu}\) and add a term proportional to the metric, the identity \(D_\nu g^{\mu\nu} = 0\) means the [[stress-energy conservation equation|eq:SEconservation]] is unchanged:
\begin{equation}
D_{\nu} T^{\mu \nu} \rightarrow D_{\nu} \left(T^{\mu \nu} + \Lambda g^{\mu \nu}\right) = 0.
\end{equation}
In our analogy with electromagnetism, this is like adding a constant to the  electromagnetic potential, \("V'(x) = V(x) + \Lambda"\). The constant \(\Lambda\) does not affect local dynamics in any way, but it does affect the cosmology. We see from the  [[stress energy tensor|eq:fluidstressenergy]] for a perfect fluid, stress-energy of the form \(T^{\mu\nu} = \Lambda g^{\mu\nu}\) corresponds to an equation of state
\begin{equation}
p_{\Lambda} = -\rho_{\Lambda}.
\end{equation}
The [[continuity equation|eq:continuity]] then reduces to
\begin{equation}
\dot\rho + 3 \left(\frac{\dot a}{a}\right) \left(\rho + p\right) = \dot\rho = 0,
\end{equation}
so that vacuum has a constant energy density, \(\rho_\Lambda = {\rm const.}\) A cosmological constant is also frequently referred to as //vacuum energy//, since it is as if we are assigning an energy density to empty space. With this interpretation, a comoving box full of vacuum contains a total amount of energy which //grows// with the expansion of the universe. 

<<tiddler [[fig:boxofvac]]>>

This highlights the curious property of General Relativity that, while energy is conserved in a local sense, it is //not// conserved globally. We are creating energy  out of nothing!

It is straightforward to solve the Einstein Field Equations for the three basic types of stress-energy. Consider first a matter-dominated universe. We can write the time derivative of the energy density as:
\begin{equation}
\rho_{\rm m} \propto a^{-3}\ \Rightarrow \dot\rho_{\rm m} = - 3 \left(\frac{\dot a}{a}\right) \rho.
\end{equation}
From the [[continuity equation|eq:continuity]], we have
\begin{equation}
\dot\rho + 3 \left(\frac{\dot a}{a}\right) \left(\rho + p\right) = 3 \left(\frac{\dot a}{a}\right) p = 0.
\end{equation}
We then have that the pressure of matter vanishes, \(p_{\rm m} = 0\). The matter-dominated Friedmann Equation becomes
\begin{equation}
\left(\frac{\dot a}{a}\right)^2 + \frac{k}{a^2} = \frac{8 \pi}{3 m_{\rm Pl}^2} \rho \propto a^{-3}. 
\end{equation}
In the case of a flat universe, \(k = 0\), the solution is especially simple:
\begin{equation}
\left(\frac{\dot a}{a}\right)^2 \propto a^{-3}\ \Rightarrow a\left(t\right) \propto t^{2/3}. 
\end{equation}
Similarly, for a radiation dominated universe, the continuity equation implies that
\begin{equation}
\rho_{\gamma} \propto a^{-4}\ \Rightarrow p_{\gamma} = \rho_{\gamma} / 3.
\end{equation}
Again assuming a flat geometry,
\begin{equation}
\left(\frac{\dot a}{a}\right)^2 \propto a^{-4}\ \Rightarrow a\left(t\right) \propto t^{1/2}. 
\end{equation}
Finally, solving the the Friedmann Equation for the vacuum case gives
\begin{equation}
\left(\frac{\dot a}{a}\right)^2 \propto \rho_{\Lambda} = {\rm const.}\ \Rightarrow a\left(t\right) \propto e^{H t},
\end{equation}
so that the universe expands exponentially quickly, with a time constant given by the Hubble parameter
\begin{equation}
H = \sqrt{\frac{8 \pi}{3 m_{\rm Pl}}^2 \rho_{\Lambda}} = {\rm const.}
\end{equation} 
Such a spacetime is called //de Sitter space//. 

Note in particular that the energy density in radiation redshifts away more quickly than the energy density in matter, and vacuum energy does not redshift at all, so that a universe with a mix of radiation, matter and vacuum will be radiation-dominated at early times, matter-dominated at later times, and eventually vacuum-dominated.

<<tiddler [[fig:rhovsz]]>>

Note also that for either matter- or radiation-domination, the universe is singular as \(t \rightarrow 0\): the universe has finite age! Since the scale factor vanishes at \(t = 0\), and the density scales as an inverse power of \(a\), the initial singularity consists of infinite density. Likewise, since temperature also scales inversely with \(a\), the initial singularity is also a point of infinite temperature. We therefore arrive at the standard hot Big Bang picture of the universe: a cosmological singularity at finite time in the past, followed by a hot, radiation-dominated expansion, during which the universe gradually cools as \(T \propto a^{-1}\) and the radiation dilutes, followed by a period of matter-dominated expansion during which galaxies and stars and planets form. Finally, if the vacuum energy is nonzero, it will inevitably dominate, and the universe will enter a state of exponential expansion. Current evidence indicates that the real universe made a transition from matter-domination to vacuum-domination at a redshift of around \(z = 1\), or about a billion years ago, so that the densities of the three types of matter today are of order
\begin{eqnarray}
\Omega_\Lambda &&\simeq 0.7,\cr
\Omega_{\rm m} &&\simeq 0.3,\cr
\Omega_{\gamma} &&\simeq 10^{-4}.
\end{eqnarray}
In the [[(1.6) The Hot Big Bang and the Cosmic Microwave Background]], we discuss one important prediction of the hot Big Bang: the presence of a background of relic photons from the early universe, called the //Cosmic Microwave Background//. 
The basic picture of an expanding, cooling universe leads to a number of startling predictions: the formation of nuclei and the resulting primordial abundances of elements, and the later formation of neutral atoms and the consequent presence of a cosmic background of photons, the Cosmic Microwave Background (CMB)  ```White, //et al.,// (1994)  [[Anisotropies in the cosmic microwave background|http://inspirehep.net/search?ln=en&p=White:1994sx&of=hd]]```  ```Hu & Dodelson (2002)  [[Cosmic microwave background anisotropies|http://inspirehep.net/search?ln=en&p=Hu:2001bc&of=hd]]```  ```Kosowsky (2001)  [[The cosmic microwave background|http://inspirehep.net/search?ln=en&p=Kosowsky:2001ue&of=hd]]```  ```Samtleben, //et al.,// (2007)  [[The Cosmic microwave background for pedestrians: A Review for particle and nuclear physicists|http://inspirehep.net/search?ln=en&p=Samtleben:2007zz&of=hd]]```  ```Hu (2008)  [[Lecture Notes on CMB Theory: From Nucleosynthesis to Recombination|http://inspirehep.net/search?ln=en&p=Hu:2008hd&of=hd]]``` . A rough history of the universe can be given as a time line of increasing time and decreasing temperature  ```Kolb:1990Ch3``` :

* \(T = \infty\), \(t = 0\): Big Bang.
* \(T \sim 10^{15}\ K\), \(t \sim 10^{-12}\ {\rm sec}\): Primordial soup of
fundamental particles.}
* \(T \sim 10^{13}\ K\), \(t \sim 10^{-6}\ {\rm sec}\): Protons and neutrons form.
* \(T \sim 10^{10}\ K\), \(t \sim 3\ {\rm min}\): Nucleosynthesis: nuclei form.
* \(T \sim 3000\ K\), \(t \sim 300,000\ {\rm years}\): Atoms form.
* \(T \sim 10\ K\), \(t \sim 10^{9}\ {\rm years}\): Galaxies form.
* \(T \sim 3\ K\), \(t \sim 10^{10}\ {\rm years}\): Today.

The epoch at which atoms form, when the universe was at an age of 300,000 years and at a temperature of around \(3000\ {\rm K}\)  is oxymoronically referred to as "recombination", despite the fact that electrons and nuclei had never before "combined" into atoms. The physics is simple: at a temperature of greater than about \(3000\ {\rm K}\), the universe consisted of an ionized plasma of mostly protons, electrons, and photons, with a few helium nuclei and a tiny trace of lithium. The important characteristic of this plasma is that it was //opaque//, or, more precisely, the mean free path of a photon was a great deal smaller than the Hubble length. As the universe cooled and expanded, the plasma "recombined" into neutral atoms, first the helium, then a little later the hydrogen. 

<<tiddler [[fig:recombination]]>>

If we consider hydrogen alone, the process of recombination can be described by the //Saha equation// for the  equilibrium ionization fraction \(X_{\rm e}\) of the hydrogen  ```Kolb:1990Ch3``` :
<<tiddler [[eqsahaequation]]>>
Here \(m_{\rm e}\) is the electron mass and \(13.6\ {\rm eV}\) is the ionization energy of hydrogen. The physically important parameter affecting recombination is the density of protons and electrons compared to photons. This is determined by the //baryon asymmetry//,"`If there were no excess of baryons over antibaryons, there would be no protons and electrons to recombine, and the universe would be just a gas of photons and neutrinos!"' which is described as the ratio of baryons to photons:
\begin{equation}
\eta \equiv \frac{n_{\rm b} - n_{\rm \bar b} }{ n_\gamma} = 2.68 \times 10^{-8} 
\left(\Omega_{\rm b} h^2\right).
\end{equation}
Here \(\Omega_{\rm b}\) is the baryon density and \(h\) is the Hubble constant in units of \(100\ {\rm km/s/Mpc}\),
\begin{equation}
h \equiv H_0 / (100\ {\rm km /s/Mpc}).
\end{equation}
The most recent result from the WMAP satellite gives \(\Omega_{\rm b} h^2 = 0.02273 \pm 0.00062\)  ```Dunkley, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Likelihoods and Parameters from the WMAP data|http://inspirehep.net/search?ln=en&p=Dunkley:2008ie&of=hd]]``` . Recombination happens quickly (i.e., in much less than a Hubble time \(t \sim H^{-1}\)), but it is not instantaneous. The universe goes from a completely ionized state to a neutral state over a range of redshifts \(\Delta z \sim 200\). If we define recombination as an ionization fraction \(X_{\rm e} = 0.1\), we have that the temperature at recombination \(T_{\rm R} = 0.3\ {\rm eV}\).

What happens to the photons after recombination? Once the gas in the universe is in a neutral state, the mean free path for a photon becomes much larger than the Hubble length. The universe is then full of a background of freely propagating photons with a blackbody distribution of frequencies. At the time of recombination, the background radiation has a temperature of \(T = T_{\rm R} = 3000\ {\rm K}\), and as the universe expands the photons redshift, so that the temperature of the photons drops with the increase of the scale factor, \(T \propto a(t)^{-1}\). We can detect these photons today. Looking at the sky, this background of photons comes to us evenly from all directions, with an observed temperature of \(T_0 \simeq 2.73\ {\rm K}\). This allows us to determine the redshift of recombination,
\begin{equation}
1 + z_{\rm R} = \frac{a\left(t_0\right) }{ a\left(t_{\rm R}\right)} = \frac{T_{\rm R} }{ T_0} \simeq 1100.
\end{equation}
This is the cosmic microwave background. Since by looking at higher and higher redshift objects, we are looking further and further back in time, we can view the observation of CMB photons as imaging a uniform "surface of last scattering" at a redshift of 1100.

<<tiddler [[fig:lss]]>>

To the extent that recombination happens at the same time and in the same way everywhere, the CMB will be of precisely uniform temperature.  While the observed CMB is highly isotropic, it is not perfectly so. The largest contribution to the anisotropy of the CMB as seen from earth is simply Doppler shift due to the earth's motion through space. (Put more technically, the motion is the earth's motion relative to a comoving cosmological reference frame.) CMB photons are slightly blueshifted in the direction of our motion and slightly redshifted opposite the direction of our motion. This blueshift/redshift shifts the temperature of the CMB so the effect has the characteristic form of a "dipole" temperature anisotropy. The dipole anisotropy, however, is a //local// phenomenon. Any intrinsic, or primordial, anisotropy of the CMB is potentially of much greater cosmological interest. To describe the anisotropy of the CMB, we remember that the surface of last scattering appears to us as a spherical surface at a redshift of \(1100\). Therefore the natural parameters to use to describe the anisotropy of the CMB sky is as an expansion in spherical harmonics \(Y_{\ell m}\):
\begin{equation}
\frac{\Delta T }{ T} = \sum_{\ell = 1}^{\infty} \sum_{m = -\ell}^{\ell}{a_{\ell m} 
Y_{\ell m}\left(\theta,\phi\right)}.
\end{equation}
If we assume isotropy, there is no preferred direction in the universe, and we expect the physics to be independent of the index \(m\). We can then define
\begin{equation}
C_{\ell} \equiv \frac{1}{2 \ell + 1} \sum_{m}{\left\vert a_{\ell m} \right\vert^2}.
\end{equation}
The \(\ell = 1\) contribution is just the dipole anisotropy,
\begin{equation}
\left(\frac{\Delta T }{ T}\right)_{\ell = 1} \sim 10^{-3}.
\end{equation}

The dipole was first measured in the 1970's by several groups  ```Henry:1971```  ```Corey:1976```  ```Smoot:1977``` . It was not until more than a decade after the discovery of the dipole anisotropy that the first observation was made of anisotropy for \(\ell \geq 2\), by the differential microwave radiometer aboard the Cosmic Background Explorer (COBE) satellite  ```Bennett, //et al.,// (1996)  [[Four year COBE DMR cosmic microwave background observations: Maps and basic results|http://inspirehep.net/search?ln=en&p=Bennett:1996ce&of=hd]]``` , launched in in 1990. COBE observed that the anisotropy at the quadrupole and higher \(\ell\) was two orders of magnitude smaller than the dipole:
\begin{equation}
\left(\frac{\Delta T }{ T}\right)_{\ell > 1} \simeq 10^{-5}.
\end{equation} 

<<tiddler [[fig:COBE]]>>

<<tiddler [[fig:WMAP]]>>

The simplest contribution to the CMB anisotropy from density fluctuations is just a gravitational redshift, known as the //Sachs-Wolfe effect//  ```Sachs:1967``` . A photon coming from a region which is slightly denser than the average will have a slightly larger redshift due to the deeper gravitational well at the surface of last scattering. Conversely, a photon coming from an underdense region will have a slightly smaller redshift. Thus we can calculate the CMB temperature anisotropy due to the slightly varying Newtonian potential \(\Phi\) from density fluctuations at the surface of last scattering:
<<tiddler [[eq:SachsWolfe]]>>
where \(\Phi_{\rm em}\) is the potential at the point the photon was emitted on the surface of last scattering, and \(\Phi_{\rm obs}\) is the potential at the point of observation, which can be treated as a constant. The factor \(1/3\) is a General Relativistic correction. This simple kinematic contribution to the CMB anisotropy is dominant on large angular scales, corresponding to multipoles \(\ell < 100\).  However, the amount of information we can gain from these multipoles is limited by an intrinsic source of error called //cosmic variance//. Cosmic variance is a result of the statistical nature of the primordial power spectra: since we have only one universe to measure, we have only one realization of the random field of density perturbations, and therefore there is an inescapable \(1 / \sqrt{N}\) uncertainty in our ability to reconstruct the primordial power spectrum, where \(N\) is the number of independent wave modes which will fit inside the horizon of the universe! On very large angular scales, this problem becomes acute, and we can write the cosmic variance error on any given \(C_\ell\) as
\begin{equation}
\frac{\Delta C_\ell}{C_\ell} = \frac{1}{\sqrt{2 \ell + 1}},
\end{equation}
which comes from the fact that any \(C_\ell\) is represented by \(2 \ell + 1\) independent amplitudes \(a_{\ell m}\). Even a perfect observation of the CMB can only approximately measure the true power spectrum -- the errors in the WMAP data, for example, are dominated by cosmic variance out to \(\ell \sim 400\) .

For fluctuation modes on smaller angular scales, more complicated physics comes into play. The dominant process that occurs on short wavelengths is //acoustic oscillations// in the baryon/photon plasma. The idea is simple: matter tends to collapse due to gravity  onto regions where the density is higher than average, so the baryons "fall" into overdense regions. However, since the baryons and the photons are still strongly coupled, the photons tend to resist this collapse and push the baryons outward. The result is "ringing", or oscillatory modes of compression and rarefaction in the gas due to density fluctuations. The gas heats as it compresses and cools as it expands, which creates fluctuations in the temperature of the CMB. This manifests itself in the \(C_\ell\) spectrum as a series of peaks and valleys.

<<tiddler [[fig:Cl]]>>

The specific shape and location of the acoustic peaks is created by complicated but well-understood physics, involving a large number of cosmological parameters. The presence of acoustic peaks in the CMB was first suggested by Sakharov  ```Sakharov:1965``` , and later calculated by Sunyaev and Zel'dovich  ```Zeldovich & Sunyaev (1969)  [[The Interaction of Matter and Radiation in a Hot-Model Universe|http://inspirehep.net/search?ln=en&p=Zeldovich:1969ff&of=hd]]```  ```Sunyaev & Zeldovich (1970)  [[Small scale fluctuations of relic radiation|http://inspirehep.net/search?ln=en&p=Sunyaev:1970eu&of=hd]]```  and Peebles and Yu  ```Peebles & Yu (1970)  [[Primeval adiabatic perturbation in an expanding universe|http://inspirehep.net/search?ln=en&p=Peebles:1970ag&of=hd]]``` . The complete linear theory of CMB fluctuations was worked out by Ma and Bertschinger in 1995  ```Ma & Bertschinger (1995)  [[Cosmological perturbation theory in the synchronous and conformal Newtonian gauges|http://inspirehep.net/search?ln=en&p=Ma:1995ey&of=hd]]``` . The shape of the CMB multipole spectrum depends, for example, on the baryon density \(\Omega_{\rm b}\), the Hubble constant \(H_0\), the densities of matter \(\Omega_{\rm m}\) and cosmological constant \(\Omega_{\Lambda}\), the amplitude of primordial gravitational waves, and the redshift \(z_{\rm ri}\) at which the first generation of stars ionized the intergalactic medium. This makes interpretation of the spectrum something of a complex undertaking, but it also makes it a sensitive probe of cosmological models.

In addition to anisotropy in the temperature of the CMB, the photons coming from the surface of last scattering are expected to be weakly polarized due to the presence of perturbations  ```Kosowsky (1999)  [[Introduction to microwave background polarization|http://inspirehep.net/search?ln=en&p=Kosowsky:1998mb&of=hd]]```  ```Zaldarriaga (2003)  [[The polarization of the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Zaldarriaga:2003bb&of=hd]]``` . This polarization is much less well measured than the temperature anisotropy, but it has been detected by WMAP and by a number of ground- and balloon-based measurements  ```Leitch, //et al.,// (2005)  [[DASI three-year cosmic microwave background polarization results|http://inspirehep.net/search?ln=en&p=Leitch:2004gd&of=hd]]```  ```Sievers, //et al.,// (2007)  [[Implications of the cosmic background imager polarization data|http://inspirehep.net/search?ln=en&p=Sievers:2005gj&of=hd]]```  ```Montroy, //et al.,// (2006)  [[A Measurement of the CNB &amp;lt;EE&amp;gt; spectrum from the 2003 flight of BOOMERANG|http://inspirehep.net/search?ln=en&p=Montroy:2005yx&of=hd]]```  ```Wu, //et al.,// (2007)  [[MAXIPOL: Data Analysis and Results|http://inspirehep.net/search?ln=en&p=Wu:2006ji&of=hd]]```  ```Nolta, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Angular Power Spectra|http://inspirehep.net/search?ln=en&p=Nolta:2008ih&of=hd]]``` . Measurement of polarization promises to greatly increase the amount of information it is possible to extract from the CMB.  Of particular interest is the odd-parity, or //B-mode// component of the polarization, the only primordial source of which is gravitational waves, and thus provides a clean signal for detection of these perturbations. The B-mode has yet to be detected by any measurement.


One of the things that cosmologists most want to measure accurately is the total density \(\rho\) of the universe, usually expressed in terms of the density parameter \(\Omega\):
\begin{eqnarray}
&&< 1:\ {\rm Open}\cr
\Omega \equiv {\rho \over \rho_c} &&= 1:\ {\rm Flat}\cr
&&>1:\ {\rm Closed}.
\end{eqnarray}
where \(\rho_c\) is the critical density necessary for a flat universe.
\begin{equation}
\rho_c \equiv {3 H_0^2 \over 8 \pi G},
\end{equation}
If the energy density is greater than critical, \(\rho > \rho_c\), the universe is closed and has a positive curvature (\(k = +1\)). In this case, the universe also has a finite lifetime, eventually collapsing back on itself in a "big crunch". If \(\rho < \rho_c\), the universe is open, with negative curvature, and has an infinite lifetime.

There has long been a debate between theorists and observers as to what the value of \(\Omega\) is in the real universe. Theorists have steadfastly maintained that the only sensible value for \(\Omega\) is unity, \(\Omega = 1\). This prejudice was further strengthened by the development of the theory of inflation, which solves [[several cosmological puzzles|(2.2) The Flatness and Horizon Problems]] and in fact //predicts// that \(\Omega\) will be exponentially close to unity. Observers, however, have made attempts to measure \(\Omega\) using a variety of methods, including measuring galactic rotation curves, the velocities of galaxies orbiting in clusters, X-ray measurements of galaxy clusters, the velocities and spatial distribution of galaxies on large scales, and gravitational lensing. These measurements have repeatedly pointed to a value of \(\Omega\) inconsistent with a flat cosmology, with \(\Omega = 0.2-0.3\) being a much better fit, indicating an open, negatively curved universe. Until a few years ago, theorists have resorted to cheerfully ignoring the data, taking it almost on faith that \(\Omega = 0.7\) in extra stuff would turn up sooner or later. The theorists were right: new observations of the cosmic microwave background definitively favor a flat universe, \(\Omega = 1\). Unsurprisingly, the observationalists were also right: only about \(1/3\) of this density appears to be in the form of ordinary matter.

The first hint that something strange was up with the standard cosmology came from measurements of the colors of stars in globular clusters. [[Globular clusters|https://en.wikipedia.org/wiki/Globular_cluster]] are small, dense groups of \(10^{5}\) - \(10^{6}\) stars which orbit in the halos of most galaxies and are among the oldest objects in the universe. Their ages are determined by observation of stellar populations and models of stellar evolution, and some globular clusters are known to be at least 12 billion years old  ```Salaris & Weiss (2002)  [[Homogeneous age dating of 55 galactic globular clusters. clues to the galaxy formation mechanisms|http://inspirehep.net/search?ln=en&p=Salaris:2002bj&of=hd]]``` , implying that the universe itself must be at least 12 billion years old. But consider a flat universe \(\Omega = 1\) filled with pressureless matter, \(\rho \propto a^{-3}\) and \(p = 0\). It is straightforward to solve the [[Friedmann Equation|eq:generalFRW]] with \(k = 0\) to show that
\begin{equation}
a\left(t\right) \propto t^{2/3}.
\end{equation}
The Hubble parameter is then given by
\begin{equation}
H = {\dot a \over a} = {2 \over 3} t^{-1}.
\end{equation}
We therefore have a simple expression for the age of the universe \(t_0\) in terms of the measured Hubble constant \(H_0\),
\begin{equation}
t_0 = {2 \over 3} H_0^{-1}.
\end{equation}
The fact that the universe has a finite age introduces the concept of a //horizon//: this is just how far out in space we are capable of seeing at any given time. This distance is finite because photons have only traveled a finite distance since the beginning of the universe. Just as in special relativity, photons travel on paths with proper length \(ds^2 = dt^2 - a^2 d{\bf x}^2 = 0\), so that we can write the physical distance a photon has traveled since the Big Bang, or the //horizon size//, as
\begin{equation}
d_{\rm H} = a(t_0) \int_{0}^{t_0}{dt \over a(t)}.
\end{equation}
(This is in units where the speed of light is set to \(c = 1\).) For example, in a flat, matter-dominated universe, \(a(t) \propto t^{2/3}\), so that the horizon size is
\begin{equation}
d_{\rm H} = t_0^{2/3} \int_{0}^{t0}{t^{-2/3} d t} = 3 t_0 = 2 H_0^{-1}.
\end{equation}
This form for the horizon distance is true in general: the distance a photon travels in time \(t\) is always about \(d \sim t\): effects from expansion simply add a numerical factor out front. We will frequently ignore this, and approximate
\begin{equation}
d_{\rm H} \sim t_0 \sim H_0^{-1}.
\end{equation}
Measured values of \(H_0\) are quoted in a very strange unit of time, a km/s/Mpc, but it is a simple matter to calculate the dimensionless factor using \(1\ {\rm Mpc} \simeq 3 \times 10^{19}\ {\rm km}\), so that the age of a flat, matter-dominated universe with \(H_0 = 72 \pm 8\ {\rm km/s/Mpc}\) is
\begin{equation}
t_0 = 8.8^{+ 1.1}_{- 0.9} \times 10^{9}\ {\rm years}.
\end{equation}
A flat, matter-dominated universe would be younger than the things in it! Something is evidently wrong -- either the estimates of globular cluster ages are too big, the measurement of the Hubble constant from from the HST is incorrect, the universe is not flat, or the universe is not matter dominated.

We will take for granted that the measurement of the Hubble constant is correct, and that the models of stellar structure are good enough to produce a reliable estimate of globular cluster ages (as they appear to be), and focus on the last two possibilities. An open universe, \(\Omega_0 < 1\), might solve the age problem. The figure below shows the age of the universe consistent with the HST Key Project value for \(H_0\) as a function of the density parameter \(\Omega_0\).

<<tiddler [[fig:age]]>>

We see that the age determined from \(H_0\) is consistent with globular clusters as old as 12 billion years only for values of \(\Omega_0\) less than \(0.3\) or so. However, measurements of the cosmic microwave background strongly indicate that we indeed live in a flat (\(\Omega = 1\)) universe. So while a low-density universe might provide a marginal solution to the age problem, it would conflict with the CMB. We therefore, perhaps reluctantly, are forced to consider that the universe might not be matter dominated.

The solution is Einstein's cosmological constant, or vacuum energy. The important fact about vacuum energy is that it results in accelerated expansion of the universe. From the [[Raychaudhuri Equation|eq:generalFRW]], we can write the acceleration \(\ddot a\) in terms of the equation of state \(p = w \rho\) of the matter in the universe,
\begin{equation}
{\ddot a \over a} \propto - \left(1 + 3 w\right) \rho.
\end{equation}
For ordinary matter such as pressureless dust \(w = 0\) or radiation \(w = 1/3\), we see that the gravitational attraction of all the stuff in the universe makes the expansion slow down with time, \(\ddot a < 0\). But we have seen that a cosmological constant has the odd property of negative pressure, \(w = -1\), so that a universe dominated by vacuum energy actually expands faster and faster with time, \(\ddot a > 0\). It is easy to see that accelerating expansion helps with the age problem: for a standard matter-dominated universe, a larger Hubble constant means a //younger universe//, \(t_0 \propto H_0^{-1}\). But if the expansion of the universe is accelerating, this means that \(H\) grows with time. For a given age \(t_0\), acceleration means that the Hubble constant we measure will be larger in an accelerating cosmology than in a decelerating one, so we can have our cake and eat it too: an old universe and a high Hubble constant! This also resolves the old dispute between the observers and the theorists. Astronomers measuring the density of the universe use local dynamical measurements such as the orbital velocities of galaxies in a cluster. These measurements are insensitive to a cosmological constant and only measure the //matter// density \(\rho_{\rm m}\) of the universe. However, geometrical tests like the cosmic microwave background  are sensitive to the //total// energy density \(\rho_{\rm m} + \rho_{\rm \Lambda}\). If we take the observational value for the matter density \(\Omega_{\rm m} = 0.2-0.3\) and make up the difference with a cosmological constant, \(\Omega_{\Lambda} = 0.7-0.8\), we arrive at an age for the universe in excess of \(13\ {\rm Gyr}\), perfectly consistent with the globular cluster data.

<<tiddler [[fig:ageLCDM]]>>

In the 1980s and 1990s, there were some researchers making the argument based on the age problem alone that we needed a cosmological constant  ```Peebles (1984)  [[Tests of Cosmological Models Constrained by Inflation|http://inspirehep.net/search?ln=en&p=Peebles:1984ge&of=hd]]```  ```Turner, //et al.,// (1984)  [[The Flatness of the Universe: Reconciling Theoretical Prejudices with Observational Data|http://inspirehep.net/search?ln=en&p=Turner:1984nf&of=hd]]```  ```Turner (1991)  [[Dark matter in the universe|http://inspirehep.net/search?ln=en&p=Turner:1990qb&of=hd]]```  ```Krauss & Turner (1995)  [[The Cosmological constant is back|http://inspirehep.net/search?ln=en&p=Krauss:1995yb&of=hd]]``` . There were also some observational indications favoring a cosmological constant  ```Jackson & Dodgson (1997)  [[Deceleration without dark matter|http://inspirehep.net/search?ln=en&p=Jackson:2006ib&of=hd]]``` . But the case was hardly compelling, given that the CMB results indicating a flat universe had not yet been measured, and a low-density universe presented a simpler alternative, based on a cosmology containing matter alone. However, there was another observation that pointed clearly toward the need for \(\Omega_{\Lambda}\): Type Ia supernovae (SNIa) measurements. A detailed discussion of these measurements is beyond the scope of these lectures, but the principle is simple: SNeIa represent a //standard candle//, i.e. objects whose intrinsic brightness we know, based on observations of nearby supernovae. They are also extremely bright, so they can be observed at cosmological distances. Two collaborations, the [[Supernova Cosmology Project|http://http://supernova.lbl.gov/]]  and the [[High-z Supernova Search|http://cfa-www.harvard.edu/cfa/oir/Research/supernova/HighZ.html]] obtained samples of supernovae at redshifts around \(z = 0.5\). This is far enough out that it is possible to measure deviations from the linear Hubble law \(v = H_0 d\) due to the time-evolution of the Hubble parameter: the groups were able to //measure// the acceleration or deceleration of the universe directly. If the universe is decelerating, objects at a given redshift will be closer to us, and therefore brighter than we would expect based on a linear Hubble law. Conversely, if the expansion is accelerating, objects at a given redshift will be further away, and therefore dimmer. The result from both groups was that the supernovae were consistently dimmer than expected. The figure below shows the data from the Supernova Cosmology Project, who quoted a best fit of \(\Omega_{\rm m} \simeq 0.3\), \(\Omega_{\Lambda} \simeq 0.7\), just what was needed to reconcile the dynamical mass measurements with a flat universe! Members of both collaborations received the [[2011 Nobel Prize|http://www.nobelprize.org/nobel_prizes/physics/laureates/2011/press.html/]] for the discovery. Since then, many independent tests for the presence of this //Dark Energy// have been shown to be consistent with a "concordance cosmology" consisting of around 30% matter and 70% Dark Energy.

<<tiddler [[fig:SCP]]>>

<<tiddler [[fig:LCDMConstraints]]>>

We have arrived at a very curious picture, in which 70% of the universe is made of something that looks very much like Einstein's "greatest blunder", a cosmological constant. Furthermore, a cosmological constant is a natural expectation from quantum field theory, which we take up in [[(2.1) Vacuum Energy in Quantum Field Theory]].
In this section, we will discuss something that at first glance appears to be entirely unrelated to cosmology: the vacuum in quantum field theory. We will see later, however, that it will in fact be crucially important to cosmology. Let us start with basic quantum mechanics, in the form of the simple harmonic oscillator, with Hamiltonian
\begin{equation}
H = \hbar \omega\left(\hat a^{\dagger} \hat a + {1\over 2}\right),
\end{equation}
where \(\hat a\) and \(\hat a^{\dagger}\) are the lowering and raising operators, respectively, with commutation relation
\begin{equation}
\left[\hat a,\hat a^{\dagger}\right] = 1.
\end{equation}
This leads to the familiar ladder of energy eigenstates \(\left\vert n \right\rangle\),
\begin{equation}
H \left\vert n \right\rangle = \hbar \omega \left(n + {1 \over 2}\right) \left\vert n \right\rangle
= E_n \left\vert n \right\rangle.
\end{equation}
The simple harmonic oscillator is pretty much the only problem that physicists know how to solve. Applying the old rule that if all you have is a hammer, everything looks like a nail, we construct a description of quantum fields by placing an infinite number of harmonic oscillators at every point,
\begin{equation}
H = \int^{\infty}_{-\infty}{d^3 k \left[\hbar \omega_k \left({\hat a_{\bf k}}^{\dagger} {\hat a_{\bf k}} + {1 \over 2}\right)\right]},
\end{equation}
where the operators \({\hat a_{\bf k}}\) and \({\hat a_{\bf k}}^{\dagger}\) have the commutation relation
\begin{equation}
\left[{\hat a_{\bf k}}, \hat a_{\bf k'}^{\dagger}\right] = \delta^3\left({\bf k} - {\bf k'}\right).
\end{equation}
Here we make the identification that  \({\bf k}\) is the momentum of a particle, and \(\omega_{k}\) is the energy of the particle,
\begin{equation}
\omega_k^2 - \left\vert {\bf k}\right\vert^2 = m^2
\end{equation}
for a particle of mass \(m\). Taking \(m = 0\) gives the familiar dispersion relation for massless particles like photons. Like the state kets \(\left\vert n \right\rangle\) for the harmonic oscillator, each momentum vector \(\bf k\) has an independent ladder of states, with the associated quantum numbers, \("\left\vert n_{\bf k}, \ldots, n_{\bf k'}\right\rangle"\). The raising and lowering operators are now interpreted as //creation// and //annihilation// operators, turning a ket with \(n\) particles into a ket with \(n + 1\) particles, and vice-versa:
\begin{equation}
\left\vert n_{\bf k} = 1\right\rangle = {\hat a_{\bf k}}^{\dagger} \left\vert 0 \right\rangle,
\end{equation}
and we call the ground state \(\left\vert 0 \right\rangle\) the //vacuum//, or zero-particle state. But there is one small problem: just like the ground state of a single harmonic oscillator has a nonzero energy \(E_0 = (1/2) \hbar \omega\), the vacuum state of the quantum field also has an energy,
<<tiddler [[eqdivergentvacuum]]>>
The ground state energy diverges! The solution to this apparent paradox is that we expect quantum field theory to break down at very high energy. We therefore introduce  a cutoff on the momentum \({\bf k}\) at high energy, so that the [[ground-state energy|eqdivergentvacuum]] becomes finite. A reasonable physical scale for the cutoff is the scale at which we expect quantum gravitational effects to become relevant, the Planck scale \(m_{\rm Pl}\). Therefore we expect the vacuum everywhere to have a constant energy density, given in units where \(\hbar = c = 1\) as
\begin{equation}
\rho \sim  H\left\vert 0 \right\rangle \sim m_{\rm Pl}^4 \sim 10^{93}\ {\rm g/cm^3}.
\end{equation}

We have already met up with an energy density that is constant everywhere in space: Einstein's cosmological constant, \(\rho_{\Lambda} = {\rm const.}\), \(p_{\Lambda} = - \rho_{\Lambda}\). This //dark energy// can possibly be identified with the vacuum energy predicted by quantum field theory. However, the energy density is wrong, since the critical density of the universe is
\begin{equation}
\rho_c \sim 10^{-30}\ {\rm g/cm^3}.
\end{equation}
Our estimate of the vacuum energy density from quantum field theory is more than 120 orders of magnitude off! Few, if any, satisfying explanations have been proposed to resolve this discrepancy. For example, some authors have proposed arguments based on the [[Anthropic Principle]]  ```Barrow, //et al.,// (2002)  [[Anthropic reasons for nonzero flatness and Lambda|http://inspirehep.net/search?ln=en&p=Barrow:2001ks&of=hd]]```  ```Kallosh & Linde (2003)  [[M theory, cosmological constant and anthropic principle|http://inspirehep.net/search?ln=en&p=Kallosh:2002gg&of=hd]]```  ```Garriga & Vilenkin (2003)  [[Testable anthropic predictions for dark energy|http://inspirehep.net/search?ln=en&p=Garriga:2002tq&of=hd]]```  to explain the low value of \(\rho_{\Lambda}\), but this explanation is controversial to say the least. There is a large body of literature devoted to the idea that the dark energy is something other than the quantum zero-point energy we have considered here:  a good review can be found in Peebles & Ratra  ```Peebles & Ratra (2003)  [[The Cosmological constant and dark energy|http://inspirehep.net/search?ln=en&p=Peebles:2002gy&of=hd]]``` . However, it is safe to say that the dark energy that dominates the universe is currently unexplained, but it is of tremendous interest from the standpoint of fundamental theory. This will form the main theme of these lectures: cosmology provides us a way to study a question of central importance for particle theory, namely the nature of the vacuum in quantum field theory. This is something that cannot be studied in particle accelerators, so in this sense cosmology provides a unique window on particle physics. With the introduction of the idea of inflation, we will see that vacuum energy is important not only in the universe today. It had an important influence on the very early universe as well, providing an explanation for the origin of the primordial density fluctuations that later collapsed to form all structure in the universe. This provides us with yet another way to study the "physics of nothing", arguably one of the most important questions in fundamental theory today.
We have so far considered two types of cosmological mass-energy -- matter and radiation -- and solved the Friedmann Equation for the simple case of a flat universe. What about the more general case? In this section, we consider non-flat universes with general contents. We introduce two related questions which are not explained by the standard Big Bang cosmology: why is the universe so close to flat today, and why is it so large?

We can describe a general homogeneous, isotropic mass-energy by its equation of state
\begin{equation}
p = w \rho,
\end{equation}
so that pressureless matter corresponds to \(w = 0\), and radiation corresponds to \(w = 1/3\). We will consider only the case of constant equation of state, \(w = {\rm const.}\) From the continuity equation, we have
\begin{equation}
\dot\rho + 3 \left(1 + w\right) \frac{\dot a}{a} \rho = 0,
\end{equation}
with solution
\begin{equation}
\rho \propto a^{-3 \left( 1 + w \right)}.
\end{equation}
The Friedmann Equation for a flat universe is then
\begin{equation}
\left(\frac{\dot a}{a}\right)^2 \propto a^{-3 \left(1 + w\right)},
\end{equation}
so that the scale factor increases as a power-law in time,
\begin{equation}
a\left(t\right) \propto t^{2 / 3 \left(1 + w\right)}.
\end{equation}
What about the evolution of a non-flat universe? Analytic solutions for \(a(t)\) in the \(k \neq 0\) case can be found in cosmology textbooks. For our purposes, it is sufficient to consider the time-dependence of the density parameter \(\Omega\). From the [[Friedmann Equation|eq:FRWOmega]]  it is not too difficult to show that the density parameter evolves with the scale factor \(a\) as:
<<tiddler [[eq:Omegaevolution]]>>
Proof is left as an exercise for the reader. Note that a flat universe, \(\Omega = 1\) remains flat at all times, but in a non-flat universe, the density parameter \(\Omega\) is a time-dependent quantity, with the evolution determined by the equation of state parameter \(w\). For matter (\(w = 0\)) or radiation (\(w = 1/3\)), the prefactor above is positive,
\begin{equation}
1 + 3 w > 0,
\end{equation}
which means a flat universe is an //unstable// fixed point:
\begin{equation}
\frac{d \left\vert\Omega - 1\right\vert}{d \ln{a}} > 0,\ \left(1 + 3 w\right) > 0.
\end{equation}
Any deviation from a flat geometry is amplified by the subsequent cosmological expansion, so a nearly flat universe today is a highly fine-tuned situation. The WMAP5 CMB measurement tells us the universe is flat to within a few percent, \(\left\vert \Omega_0 - 1\right\vert < 0.02\)  ```Dunkley, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Likelihoods and Parameters from the WMAP data|http://inspirehep.net/search?ln=en&p=Dunkley:2008ie&of=hd]]```  ```Komatsu, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Cosmological Interpretation|http://inspirehep.net/search?ln=en&p=Komatsu:2008hk&of=hd]]``` . If we are very conservative and take a limit on the density today as \(\Omega_0 = 1 \pm 0.05\), that means that at recombination, when the CMB was emitted, \(\Omega_{\rm rec} = 1 \pm 0.0004\), and at the time of primordial nucleosynthesis, \(\Omega_{\rm nuc} = 1 \pm 10^{-12}\). Why did the universe start out so incredibly close to flat? The standard Big Bang cosmology provides no answer to this question, which we call the //flatness problem//.

There is a second, related problem with the standard Big Bang picture, arising from the finite age of the universe. Because the universe has a finite age, photons can only have traveled a finite distance in the time since the Big Bang. Therefore, the universe has a //horizon//: the further out in space we look, the further back in time we see. If we could look far enough out in any direction, past the surface of last scattering, we would be able to see the Big Bang itself, and beyond that we can see no further. Every observer in an FRW spacetime sees herself at the center of a spherical horizon which defines her observable universe. To calculate the size of our horizon, we use the fact that photons travel on paths of zero proper length:
\begin{equation}
ds^2 = dt^2 - a^2\left(t\right) \left\vert d{\bf x}\right\vert^2 = 0,
\end{equation}
so that the comoving distance \(\left\vert d{\bf x}\right\vert\) traversed by a photon in time \(dt\) is
\begin{equation}
\left\vert d{\bf x}\right\vert = \frac{dt}{a\left(t\right)}.
\end{equation}
Therefore, the size of the cosmological horizon at time \(t\) after the Big Bang is
\begin{equation}
d_{\rm H}\left(t\right) = \int_{0}^{t}{\frac{dt'}{a\left(t'\right)}}.
\end{equation}
To convert comoving length to proper length, we just multiply by \(a\left(t\right)\), so that the proper horizon size is
\begin{equation}
d_{\rm H}^{\rm prop}\left(t\right) = a\left(t\right) d_{\rm H}^{\rm com}\left(t\right).
\end{equation}
Normalizing \(a\left(t_0\right) = 1\), the horizon size of a 14-billion year-old flat, matter-dominated universe is \(d_{\rm H} = 3 t_0 \sim 13\ {\rm Gpc}\).

To see why the presence of a horizon is a problem for the standard Big Bang, we examine the causal structure of an FRW universe. Take the FRW metric
\begin{equation}
ds^2 =  dt^2 - a^2\left(t\right) \left\vert d{\bf x}\right\vert^2,
\end{equation}
and re-write it in terms of a redefined clock, the //conformal time// \(\tau\):
<<tiddler [[eq:conformalmetric]]>>
Conformal time is a "clock" which slows down with the expansion of the universe,
\begin{equation}
d\tau = \frac{dt}{a\left(t\right)},
\end{equation}
so that the comoving horizon size is just the age of the universe in conformal time
<<tiddler [[eq:conformalhorizon]]>>
The conformal metric is useful because the expansion of the spacetime is factored into a static metric multiplied by a time-dependent conformal factor \(a\left(\tau\right)\), so that photon geodesics are simply described by \(d\left\vert{\bf x}\right\vert = d\tau\). In a diagram of \(\tau\) versus \(\left\vert{\bf x}\right\vert\), photons travel on \(45^{\circ}\) angles. (Note that this is true even for curved spacetimes!) We can draw light cones and infer causal relationships with the expansion factored out, in a manner identical to the usual case of Minkowski Space.

There is one major difference between FRW and Minkowski: an FRW spacetime has a //finite age//. Therefore, unlike the case of Minkowski Space, which has an infinite past, an FRW spacetime is "chopped off" at some finite past time \(\tau = 0\).


<<tiddler [[fig:FRWdiagram]]>>

Consider two points on the CMB sky \(180^{\circ}\) degrees apart:

<<tiddler [[fig:CMBcausaldiagram]]>>

Their past light cones do not overlap, and the two points are causally //disconnected//. Those two points on the surface of last scattering occupy completely separate, disconnected observable universes. How did these points reach the observed thermal equilibrium to a few parts in \(10^{5}\) if they never shared a causal past? This apparent paradox is called the //horizon problem//: the universe somehow reached nearly perfect equilibrium on scales much larger than the size of any local horizon. From the Friedmann Equation, it is easy to show that the horizon problem and the flatness problem are related: consider a comoving length scale \(\lambda\). It is is easy to show that for \(w = {\rm const.}\), the ratio of \(\lambda\) to the horizon size \(d_{\rm H}\) is related to the curvature by a conservation law
<<tiddler [[eq:flatnesshorizoncons]]>>
Proof is left as an exercise for the reader. Therefore, for a universe evolving away from flatness,
\begin{equation}
\frac{d\left\vert \Omega - 1\right\vert}{d \ln{a}} > 0,
\end{equation}
the horizon size gets bigger in comoving units
\begin{equation}
\frac{d}{d\ln{a}}\left(\frac{\lambda}{d_{\rm H}}\right) < 0.
\end{equation}
That is, more and more space "falls into" the horizon, or becomes causally connected, at late times.

<<tiddler [[fig:grid_md]]>>

What would be required to have a universe which evolves //toward// flatness, rather than away from it? From the [[evolution equation for \(\Omega\)|eq:Omegaevolution]], we see that having  \(1 + 3 w\) negative will do the trick,
\begin{equation}
\frac{d \left\vert\Omega - 1\right\vert}{d \ln{a}} < 0,\ \left(1 + 3 w\right) < 0.
\end{equation}
Therefore, if the energy density of the universe is dominated not by matter or radiation, but by something with sufficiently negative pressure, \(p < -\rho / 3\), a curved universe will become flatter with time. From the [[Raychaudhuri Equation|eq:generalFRW]], we see that the case of \(p < -\rho/3\) is exactly equivalent to an accelerating expansion:
\begin{equation}
\frac{\ddot a}{a} \propto - (1 + 3 w) > 0,\ \left(1 + 3 w\right) < 0. 
\end{equation}
If the expansion of the universe is slowing down, as is the case for matter- or radiation-domination, the curvature evolves away from flatness. But if the expansion is speeding up, the universe gets flatter. As a result of the [[conservation law|eq:flatnesshorizoncons]] relating the horizon size to the curvature, we see that this negative pressure solution also solves the horizon problem, since accelerating expansion means that the horizon size is shrinking in comoving units:
\begin{equation}
\frac{d}{d\ln{a}}\left(\frac{\lambda}{d_{\rm H}}\right) > 0,\ \left(1 + 3 w\right) < 0.
\end{equation}
When the expansion accelerates, distances initially smaller than the horizon size are "redshifted" to scales larger than the horizon at late times. Accelerating cosmological expansion is called //inflation//.

<<tiddler [[fig:grid_infl]]>>

The simplest example of an accelerating expansion from a negative pressure fluid is the case of vacuum energy we considered in [[(1.5) Solving the Friedmann Equation]], for which the scale factor increases exponentially,
\begin{equation}
a \propto e^{H t}. 
\end{equation}
For such expansion, the universe is driven exponentially toward a flat geometry,
\begin{equation}
\frac{d \ln{\Omega}}{d\ln{a}} = 2 \left(1 - \Omega\right).
\end{equation}
We can see that the horizon problem is also solved by looking at the conformal time:
\begin{equation}
d\tau = \frac{dt}{a\left(t\right)} = e^{-H t} dt,
\end{equation}
so that
<<tiddler [[eq:deSittertau]]>>
The conformal time during the inflationary period is //negative//, tending toward zero at late time. Therefore, if we have a period of inflationary expansion prior to the early epoch of radiation-dominated expansion, inflation takes place in negative conformal time, and conformal time \(\tau = 0\) represents not the initial singularity but the transition from the inflationary expansion to radiation domination. The initial singularity is pushed back into negative conformal time, and can be pushed arbitrarily far depending on the duration of inflation. 

<<tiddler [[fig:INFLdiagram]]>>

The past light cones of two points on the CMB sky do not intersect at \(\tau = 0\), but inflation provides a "sea" of negative conformal time, which allows those points to share a causal past. In this way, inflation solves the horizon problem. 



The example of de Sitter evolution we considered in [[(2.2) The Flatness and Horizon Problems]] gives a good qualitative picture of how inflation, or accelerated expansion, solves the horizon and flatness problems of the standard Big Bang cosmology. However, this leaves open the question: what physics is responsible for the accelerated expansion at early times? It cannot be Einstein's cosmological constant, simply because a universe dominated by vacuum energy //stays// dominated by vacuum energy for the infinite future, since in a de Sitter background matter (\(\rho \propto a^{-3}\)) and radiation (\(\rho \propto a^{-4}\)) are diluted exponentially quickly. Therefore, we will never reach a radiation-dominated phase, and we will never see a hot Big Bang. In order to transition from an inflating phase to a thermal equilibrium, radiation-dominated phase, the vacuum-like energy during inflation must be time-dependent. We model this dynamics with a scalar field \(\phi\), for which we assume the following action:
<<tiddler [[eq:scalaraction]]>>
where \(g \equiv Det\left(g_{\mu\nu}\right)\) is the determinant of the metric and the Lagrangian for the field \(\phi\) is
<<tiddler [[eq:canonicalL]]>>
Comparing the [[action|eq:scalaraction]] and the [[Lagrangian|eq:canonicalL]] with their Minkowski counterparts illustrates how we generalize a classical field theory to curved spacetime:
\begin{equation}
S_{\rm Minkowski} = \int{d^4 x \left[\frac{1}{2} \eta^{\mu\nu} \partial_\mu \phi \partial_\nu \phi - V\left(\phi\right)\right]}.
\end{equation}
The metric appears in two places in the curved-spacetime action: First, it appears in the measure of volume in the four-space, \(d^4 x\), where the determinant of the metric takes the role of the Jacobian for arbitrary coordinate transformations, \("x \rightarrow x'"\). Second, the metric appears in the kinetic term for the scalar field, where we replace the Minkowski metric \(\eta^{\mu\nu}\) with the general metric \(g^{\mu\nu}\).

The [[action|eq:scalaraction]] is not the most general assumption we could make, as we can see by writing the full action including gravity,
\begin{equation}
S_{\rm tot} = \int{d^4 x \sqrt{- g} \left[\frac{m_{\rm Pl}^2}{16 \pi} R + {\mathcal L}_\phi\right]}.
\end{equation}
Here \(R\) is the Ricci Scalar, composed of the metric and its derivatives. Variation of the first term in the action results in the [[Einstein Field Equation|eq:EFE]]. Such a //minimally coupled// theory assumes that there is no direct coupling between the field and the metric, which would be represented in a more general action by terms which mix \(R\) and  \(\phi\). In practice, many such non-minimally coupled theories can be transformed to a minimally coupled form by a field redefinition. We could also write a more general theory by modifying the scalar field [[Lagrangian|eq:canonicalL]] to contain non-canonical kinetic terms,
\begin{equation}
{\mathcal L}_\phi = F\left(\phi, g^{\mu\nu} \partial_\mu \phi \partial_\nu\phi\right) - V\left(\phi\right).
\end{equation}
where \(F()\) is some function of the field and its derivatives. Such Lagrangians appear frequently in models of inflation based on string theory, and are a topic of considerable current research interest. We could also complicate the gravitational sector by replacing the Ricci scalar \(R\) with a more complicated function \(f\left(R\right)\). An example of such a model is the inflation model of Starobinsky  ```Starobinsky (1980)  [[A New Type of Isotropic Cosmological Models Without Singularity|http://inspirehep.net/search?ln=en&p=Starobinsky:1980te&of=hd]]``` , which can be reduced to [[canonical form|eq:scalaraction]] through a field redefinition. We could also introduce multiple scalar fields.

Here we will confine ourselves for simplicity to a canonical [[Lagrangian|eq:canonicalL]] of a single scalar field, for which the only adjustable quantity is the choice of potential \(V\left(\phi\right)\). For simplicity, we assume a flat spacetime,
<<tiddler [[eq:coordinatemetric]]>>
and the equation of motion for the field \(\phi\) is then:
\begin{equation}
\ddot\phi + 3 H \dot\phi - \nabla^2 \phi + \frac{\delta V}{\delta \phi} = 0,
\end{equation}
where an overdot indicates a derivative with respect to the coordinate time \(t\), and \(H = \dot a / a\) is the Hubble parameter.  We will be particularly interested in the homogeneous mode of the field, for which the gradient term vanishes, \(\nabla \phi = 0\), so that the the functional derivative \(\delta V / \delta\phi\) simplifies to an ordinary derivative, and the equation of motion simplifies to```The astute reader may well ask: if we are claiming inflation is a solution to the problems of flatness and homogeneity in the universe, why are we assuming flatness and homogeneity from the outset? The answer is that, as long as inflation gets started //somehow// and goes on for long enough, the late-time behavior of the field \(\phi\) will always be described by [[the homogeneous equation of motion|eq:inflatoneom]]. We will see later that we only have observational access to the //end// of the inflationary period, and therefore a consistent theory of initial conditions  is not required for investigating the observational consequences of inflation.```
<<tiddler [[eq:inflatoneom]]>>
The stress-energy for a scalar field is given by
\begin{equation}
T_{\mu\nu} = \partial_\mu \phi \partial_\nu \phi - g_{\mu \nu} {\cal L}_\phi,
\end{equation}
and, for a homogeneous field, it takes the form of a perfect fluid with energy density \(\rho\) and pressure \(p\), with
<<tiddler [[eq:infleqofstate]]>>
We see that the de Sitter limit, \(p \simeq - \rho\), is just the limit in which the potential energy of the field dominates the kinetic energy, \(\dot\phi^2 \ll V\left(\phi\right)\). This limit is referred to as //slow roll//, and under such conditions the universe expands quasi-exponentially,
\begin{equation}
a\left(t\right) \propto \exp{\left(\int{H dt}\right)} \equiv e^{-N},
\end{equation}
where it is conventional to define the number of e-folds \(N\) with the sign convention
<<tiddler [[eq:numefolds]]>>
so that \(N\) is large in the far past and decreases as we go forward in time and as the scale factor \(a\) increases.

This can be made quantitative by plugging the [[energy and pressure|eq:infleqofstate]] into the Friedmann Equation
<<tiddler [[eq:scalarFriedmann]]>>
and the Raychaudhuri Equation, which we write in the convenient form
\begin{equation}
\left(\frac{\ddot a}{a}\right) = - \frac{4 \pi}{3 m_{\rm Pl}^2} \left(\rho + 3 p\right) = H^2 \left(1 - \epsilon\right).
\end{equation}
Here \(H^2\) is given in terms of \(\phi\) by the [[Friedmann Equation|eq:scalarFriedmann]], and the parameter \(\epsilon\) specifies the equation of state,
<<tiddler [[eq:defepsilon]]>>
It is a straightforward exercise to show that \(\epsilon\) is related to the evolution of the Hubble parameter by
\begin{equation}
\epsilon = - \frac{d \ln{H}}{d \ln{a}} = \frac{1}{H}\frac{d H}{d N},
\end{equation}
where \(N\) is the [[number of e-folds|eq:numefolds]]. This is a useful parameterization because the condition for accelerated expansion \(\ddot a > 0\) is simply equivalent to \(\epsilon < 1\). The de Sitter limit \(p \rightarrow -\rho\) is equivalent to \(\epsilon \rightarrow 0\), so that the potential \(V\left(\phi\right)\) dominates the energy density, and
<<tiddler [[eq:srHubble]]>>
We make the additional approximation that the friction term in the [[equation of motion|eq:inflatoneom]] dominates,
<<tiddler [[eq:secondsrcondition]]>>
so that the equation of motion for the scalar field is approximately
<<tiddler [[eq:sreom]]>>
This and the [[Friedmann Equation|eq:srHubble]] in the slow roll limit are together referred to as the //slow roll approximation//. The [[slow roll condition|eq:secondsrcondition]] can be expressed in terms of a second dimensionless parameter, conventionally defined as
<<tiddler [[eq:defeta]]>>

The parameters \(\epsilon\) and \(\eta\) are referred to as //slow roll parameters//, and the slow roll approximation is valid as long as both are small, \(\epsilon,\ \left\vert\eta\right\vert \ll 1\). It is not obvious that this will be a valid approximation for situations of physical interest: \(\eta\) need //not// be small for inflation to take place. Inflation takes place when \(\epsilon < 1\), regardless of the value of \(\eta\).  We later demonstrate explicitly that slow roll does in fact hold for interesting choices of inflationary potential. In the limit of slow roll, we can write the parameter \(\epsilon\) approximately as
<<tiddler [[eq:srepsilon]]>>
The inflationary limit, \(\epsilon \ll 1\) is then just equivalent to a field evolving on a flat potential, \(V'\left(\phi\right) \ll V\left(\phi\right)\). The second slow roll parameter \(\eta\) can likewise be written approximately as:
<<tiddler [[eq:sreta]]>>
so that the curvature \(V^{\prime\prime}\) of the potential must also be small for slow roll to be a valid approximation.  Similarly, we can write number of e-folds as a function \(N\left(\phi\right)\) of the field as:
<<tiddler [[eq:srN]]>>
The limits on the last integral are defined such that \(\phi_e\) is a fixed field value, which we will later take to be the end of inflation, and \(N\) increases as we go //backward// in time, representing the number of e-folds of expansion which take place between field value \(\phi\) and \(\phi_e\).

The qualitative picture of scalar field-driven inflation is that of a phase transition with order parameter given by the field \(\phi\). At early times, the energy density of the universe is dominated by the field \(\phi\) which is slowly evolving on a nearly constant potential, so that it approximates a cosmological constant. During this period, the universe is exponentially driven toward flatness and homogeneity. Inflation ends as the potential steepens and the field begins to oscillate about its vacuum state at the minimum of the potential. At this point, we have an effectively zero-temperature scalar in a state of coherent oscillation about the minimum of the potential, and the universe is a huge Bose-Einstein condensate: hardly a hot Big Bang! In order to transition to a radiation-dominated hot Big Bang cosmology, the energy in the inflaton field must decay into Standard Model particles, a process generically termed //reheating//. This process is model-dependent, but it typically happens very rapidly.

<<tiddler [[fig:inflationschematic]]>>

Note that the field \(\phi\) need not be a fundamental field like a Higgs boson (although it could in fact be fundamental). //Any// order parameter for a phase transition will do, as long as it has the quantum numbers of vacuum, and the effective potential has the correct properties. The inflaton \(\phi\) could well be a scalar composite of more fundamental degrees of freedom, the coordinate of a brane in a higher-dimensional compactification from string theory, a supersymmetric modulus, or something even more exotic. The simple single-field picture we discuss here is therefore an effective representation of a large variety of underlying fundamental theories. All of the physics important to inflation is contained in the shape of the potential \(V\left(\phi\right)\). (The details of the underlying theory //are// important for understanding the epoch of reheating, since the reheating process depends crucially on the specific couplings of the inflaton to the other degrees of freedom in the theory.)

How long does inflation need to go on in order to solve the flatness and horizon problems? We use a thermodynamic argument, which rests on a simple fact about cosmological expansion: as long as there are no decays or annihilations of massive particles, all other interactions conserve photon number, so that the number of photons in a comoving volume is //constant//. Since the entropy of photons is proportional to the number density, that means the entropy per comoving volume is also constant. Therefore, the total entropy in the Cosmic Microwave Background (or, equivalently, the total number of photons) is a convenient measure of spatial volume in the universe. Since the entropy per photon \(s\) is (up to a few constants) given by the cube of the temperature,
\begin{equation}
s \sim T^3,
\end{equation}
the total photon entropy \(S\) in our current horizon volume is of order
\begin{equation}
S_{\rm hor} \sim T_{\rm CMB}^3 d_{\rm H}^3 \sim \left(\frac{T_{\rm CMB}}{H_0}\right)^3 \sim 10^{88},
\end{equation}
where we have taken the CMB temperature to be \(2.7\ K\) and the current Hubble parameter \(H_0\) to be \(70\ {\rm km/s/MpC}\). (The interesting unit conversion from km/s/MpC to Kelvin is left as an exercise for the reader.)

Let us consider a highly over-simplified picture of the universe, in which no particle decays or annihilations occur between the end of inflation and today. In that case, the only time when the photon number (and therefore the entropy) in the universe changes is during the reheating process itself, when the inflation \(\phi\) decays into radiation and sets the initial state for the hot Big Bang. Therefore, we must //at minimum// create an entropy of \(10^{88}\) during reheating. Let us say that the energy density during inflation is
\begin{equation}
\rho \sim V\left(\phi\right) \sim \Lambda^4,
\end{equation}
where \(\Lambda\) is some energy scale. Therefore, the horizon size during inflation is then
\begin{equation}
d_{\rm H} \sim H^{-1} \sim \frac{m_{\rm Pl}}{\Lambda^2},
\end{equation}
so that the initial volume of the inflationary "patch" which undergoes exponential expansion is
\begin{equation}
V_i \sim d_{\rm H}^3 \sim \frac{m_{\rm Pl}^3}{\Lambda^{6}}.
\end{equation}
Suppose inflation continues for \(N\) e-folds of expansion, so that the scale factor \(a\) increases by a factor of \(e^{N}\) during inflation. The //proper// volume of the initial inflationary patch increases by the cube of the scale factor
\begin{equation}
V_f \sim e^{3 N} d_{\rm H}^3 \sim  e^{3 N} \frac{m_{\rm Pl}^3}{\Lambda^{6}}.
\end{equation}
Inflation takes a tiny patch of the universe and blows it up exponentially large, but in such a way that the energy //density// remains approximately constant: we have created an exponential amount of energy out of nothing! During reheating, this huge store of energy in the coherently oscillating field \(\phi\) decays into radiation and the temperature and entropy of the universe undergo an explosive increase. If reheating is highly efficient, then all or most of the energy stored in the inflaton field will be transformed into radiation, and the temperature of the universe after reheating will be of order the energy density of the inflaton field,
\begin{equation}
T_{\rm RH} \sim \Lambda.
\end{equation}
The entropy per comoving volume after reheating will then be \(s_{\rm RH} \sim T_{\rm RH}^3 \sim \Lambda^3\), and the //total// entropy in our inflating patch will be
\begin{equation}
S_{\rm RH} \sim V_f T_{\rm RH}^3 \sim e^{3 N} \frac{m_{\rm Pl}^3}{\Lambda^{3}}.
\end{equation}
Since this is our only source of entropy in our toy-model universe, this entropy must be at least as large as the entropy in our current horizon volume, \(S_{\rm RH} \geq 10^{88}\). The only adjustable parameter is the number of e-folds of inflation. Taking the logarithm of both sides gives a lower bound on \(N\),
<<tiddler [[eq:lowerboundN]]>>
We will see later that the amplitude of primordial density fluctuations \(\delta \rho / \rho \sim 10^{-5}\) typically constrains the inflationary energy scale to be of order \(\Lambda \sim 10^{-4} m_{\rm Pl}\), so that we have a lower limit on the number of e-folds of inflation of
\begin{equation}
N > N_{\rm min} \sim 60.
\end{equation}

Most inflation models hugely oversaturate this bound, with \(N_{\rm tot} \gg N_{\rm min}\). There is in fact no //upper// bound on the number of e-folds of inflation, an idea which is central to Linde's idea of "eternal" inflation  ```Linde (1986)  [[ETERNAL CHAOTIC INFLATION|http://inspirehep.net/search?ln=en&p=Linde:1986fc&of=hd]]```  ```Guth (2000)  [[Inflation and eternal inflation|http://inspirehep.net/search?ln=en&p=Guth:2000ka&of=hd]]```  ```Aguirre (2007)  [[Eternal Inflation, past and future|http://inspirehep.net/search?ln=en&p=Aguirre:2007gy&of=hd]]```  ```Winitzki (2008)  [[Eternal inflation|http://inspirehep.net/search?ln=en&p=Winitzki:2008zz&of=hd]]``` , in which inflation, once initiated, never completely ends, with reheating occurring only in isolated patches of the cosmos. Furthermore, it is easy to see that our oversimplified toy model of the universe gives a remarkably accurate estimate of \(N_{\rm min}\). In the real universe, all sorts of particle decays and annihilations happen between the end of inflation and today, which create additional entropy. However, our [[lower bound|eq:lowerboundN]] is only logarithmically sensitive to these processes. The dominant uncertainty is in the reheat temperature: it is possible that the energy scale of inflation is very low, or that the reheating process is very inefficient, and there are very few //observational// bounds on these scales. We do know that the universe has to be radiation dominated and in equilibrium by the time primordial nucleosynthesis happens at temperatures of order MeV. Furthermore, the baryon asymmetry of the universe is at least a good hint that the Big Bang was hot to at least the scale of electroweak unification. A typical assumption is that the reheat temperature is something between \(1\ {\rm TeV}\) and  \(10^{16}\ {\rm GeV}\), which translates into a range for \(N_{\rm min}\) of order  ```Liddle & Leach (2003)  [[How long before the end of inflation were observable perturbations produced?|http://inspirehep.net/search?ln=en&p=Liddle:2003as&of=hd]]```  ```Kinney & Riotto (2006)  [[Theoretical uncertainties in inflationary predictions|http://inspirehep.net/search?ln=en&p=Kinney:2005in&of=hd]]``` 
\begin{equation}
N_{\rm min} \simeq \left[46,60\right].
\end{equation}

In this section, we have seen that the basic picture of an early epoch in the universe dominated by vacuum-like energy, leading to nearly exponential expansion, can be realized within the context of a simple scalar field theory. The equation of state for the field approximates a cosmological constant \(p = -\rho\) when the energy density is dominated by the field potential \(V\left(\phi\right)\), and inflation ends when the potential becomes steep enough that the kinetic energy \(\dot\phi^2 / 2\) dominates over the potential. To solve the horizon and flatness problems and create a universe consistent with observation, we must have //at least// 60 or so e-folds of inflation, although in principle inflation could continue for much longer than this minimum amount. This dynamical explanation for the flatness and homogeneity of the universe is an interesting, but hardly compelling scenario. It could be that the universe started out homogeneous and flat because of initial conditions, either through some symmetry we do not yet understand, or because there are many universes, and we just happen to find ourselves in a highly unlikely realization which is homogeneous and geometrically flat. In the absence of any other observational handles on the physics of the very early universe, it is impossible to tell. However, flatness and homogeneity are not the whole story: inflation provides an elegant mechanism for explaining the //inhomogeneity// of the universe as well, which we discuss in [[(3.0) Perturbations in Inflation]].
We are now in a position to apply this to a specific case. We use the simple case of a quartic potential,
\begin{equation}
V\left(\phi\right) = \lambda \phi^4.
\end{equation}
The slow roll [[field|eq:sreom]] and [[Friedmann|eq:srHubble]] equations imply that the field evolves as:
\begin{equation}
\dot\phi = - \frac{V'\left(\phi\right)}{3 H} = - \sqrt{\frac{m_{\rm Pl}^2}{24 \pi}} \frac{V'\left(\phi\right)}{\sqrt{V\left(\phi\right)}} \propto \phi. 
\end{equation}
Note that this potential does not much qualitatively resemble the [[schematic slow roll potential|fig:INFLdiagram]]: the "flatness" of the potential arises because the energy density \(V\left(\phi\right) \propto \phi^4\) rises much more quickly than the kinetic energy, \(\dot\phi^2 \propto \phi^2\), so that if the field is far enough out on the potential, the slow roll approximation is self-consistent. The field rolls down to the potential toward the vacuum at the origin, and the equation of state is determined by the parameter \(\epsilon\),
\begin{equation}
\epsilon\left(\phi\right) \simeq \frac{m_{\rm Pl}^2}{16 \pi} \left(\frac{V'\left(\phi\right)}{V\left(\phi\right)}\right)^2 = \frac{1}{\pi} \left(\frac{m_{\rm Pl}}{\phi}\right)^2.
\end{equation}
The field value \(\phi_e\) at the end of inflation is when \(\epsilon\left(\phi_e\right) = 1\), or
\begin{equation}
\phi_e = \frac{m_{\rm Pl}}{\sqrt{\pi}}. 
\end{equation}
For \(\phi > \phi_e\), \(\epsilon < 1\) and the universe is inflating, and for \(\phi < \phi_e\), \(\epsilon > 1\) and the expansion enters a decelerating phase. Therefore, even this simple potential has the necessary characteristics to support a period of early-universe inflation followed by reheating and a hot Big Bang cosmology. What about the requirement that the universe inflate for at least 60 e-folds? Using [[the first slow roll parameter|eq:srepsilon]], we can express the [[number of e-folds|eq:srN]] before the end of inflation  as
\begin{equation}
N = \frac{2 \sqrt{\pi}}{m_{\rm Pl}} \int_{\phi_e}^{\phi}{\frac{dx}{\sqrt{\epsilon\left(x\right)}}} = \pi \left(\frac{\phi}{m_{\rm Pl}}\right)^2 - 1,
\end{equation}
where we integrate //backward// from \(\phi_e\) to \(\phi\) to be consistent with the [[sign convention for N|eq:numefolds]]. Therefore the field value \(N\) e-folds before the end of inflation is
<<tiddler [[eq:phi4N]]>>
so that
\begin{equation}
\phi_{60} = 4.4 m_{\rm Pl}.
\end{equation}
We obtain sufficient inflation, but at a price: the field must be a long way (several times the Planck scale) out on the potential. However, we do //not// necessarily have to invoke quantum gravity, since for small enough coupling \(\lambda\), the energy density in the field can much less than the Planck density, and the energy density is the physically important quantity.
The universe we live in today is homogeneous, but only when averaged over very large scales. On small scales, the size of people or solar systems or galaxies or even clusters of galaxies, the universe we see is highly inhomogeneous. Our world is full of complex structure, created by gravitational instability acting on tiny "seed" perturbations in the early universe. If we look as far back in time as the epoch of recombination, the universe on all scales was homogeneous to a high degree of precision, a few parts in \(10^5\). Recent observational efforts such as the WMAP satellite have made exquisitely precise measurements of the first tiny inhomogeneities in the universe, which later collapsed to form the structure we see today. (We discuss the WMAP observation in more detail in Section [[(4.0) Observational Constraints]].) Therefore, another mystery of Big Bang cosmology is: what created the primordial perturbations? This mystery is compounded by the fact that the perturbations we observe in the CMB exhibit correlations on scales much larger than the horizon size at the time of recombination, which corresponds to an angular multipole of \(\ell \simeq 100\), or about \(1^\circ\) as observed on the sky today. This is another version of the horizon problem: not only is the universe homogeneous on scales larger than the horizon, but whatever created the primordial perturbations must also have been capable of generating fluctuations on scales larger than the horizon. Inflation provides just such a mechanism  ```Starobinsky (1979)  [[Relict Gravitation Radiation Spectrum and Initial State of the Universe. (In Russian)|http://inspirehep.net/search?ln=en&p=Starobinsky:1979ty&of=hd]]```  ```Mukhanov & Chibisov (1981)  [[Quantum Fluctuation and Nonsingular Universe. (In Russian)|http://inspirehep.net/search?ln=en&p=Mukhanov:1981xt&of=hd]]```  ```Hawking (1982)  [[The Development of Irregularities in a Single Bubble Inflationary Universe|http://inspirehep.net/search?ln=en&p=Hawking:1982cz&of=hd]]```  ```Hawking & Moss (1983)  [[FLUCTUATIONS IN THE INFLATIONARY UNIVERSE|http://inspirehep.net/search?ln=en&p=Hawking:1982my&of=hd]]```  ```Starobinsky (1982)  [[Dynamics of Phase Transition in the New Inflationary Universe Scenario and Generation of Perturbations|http://inspirehep.net/search?ln=en&p=Starobinsky:1982ee&of=hd]]```  ```Guth & Pi (1982)  [[Fluctuations in the New Inflationary Universe|http://inspirehep.net/search?ln=en&p=Guth:1982ec&of=hd]]```  ```Bardeen, //et al.,// (1983)  [[Spontaneous Creation of Almost Scale - Free Density Perturbations in an Inflationary Universe|http://inspirehep.net/search?ln=en&p=Bardeen:1983qw&of=hd]]``` .

Consider a perturbation in the cosmological fluid with wavelength \(\lambda\). Since the proper wavelength redshifts with expansion, \(\lambda_{\rm prop} \propto a\left(t\right)\), the //comoving// wavelength of the perturbation is a constant, \(\lambda_{\rm com} = {\rm const.}\) This is true of photons or density perturbations or gravitational waves or any other wave propagating in the cosmological background. Now consider this wavelength relative to the size of the horizon: We have seen that in general the horizon as measured in comoving units is proportional to the conformal time, \(d_H \propto \tau\). Therefore, for matter- or radiation-dominated expansion, the horizon size //grows// in comoving units, so that a comoving length which is larger than the horizon at early times is smaller than the horizon at late times: modes "fall into" the horizon. The opposite is true during inflation, where the conformal time is negative and evolving toward zero: the comoving horizon size is still proportional to \(\tau\), but it now //shrinks// with cosmological expansion, and comoving perturbations which are initially smaller than the horizon are "redshifted" to scales larger than the horizon at late times.

<<tiddler [[fig:horizon]]>>

If the universe is inflating at early times, and radiation- or matter-dominated at late times, perturbations in the density of the universe which are initially smaller than the horizon are redshifted during inflation to superhorizon scales. Later, as the horizon begins to grow in comoving coordinates, the perturbations fall back into the horizon, where they act as a source for structure formation. In this way inflation explains the observed properties of perturbations in the universe, which exist at both super- and sub-horizon scales at the time of recombination. Furthermore, an important consequence of this process is that the last perturbations to exit the horizon are the //first// to fall back in. Therefore, the shortest wavelength perturbations are the ones which exited the horizon just at the end of inflation, \(N = 0\), and longer wavelength perturbations exited the horizon earlier. Perturbations about the same size as our horizon today exited the horizon during inflation at around \(N = 60\). Perturbations which exited the horizon earlier than that, \(N > 60\), are still larger than our horizon today. Therefore, it is only possible to place observational constraints on the //end// of inflation, about the last 60 e-folds. Everything that happened before that, including anything that might tell us about the initial conditions which led to inflation, is most probably inaccessible to us.

<<tiddler [[fig:ThreeScales]]>>

It is now easy to see why inflation naturally results in superhorizon correlations in the CMB. Since the horizon was much smaller at the surface of last scattering than it is today, perturbation modes of order the horizon size today were larger than the horizon at the time of last scattering, as shown in the figure below.

<<tiddler [[fig:InflationCMB]]>>

In [[(3.1) The Klein-Gordon Equation in Curved Spacetime]] we treat this physical situation quantitatively by deriving the equation of motion for field perturbations \(\delta\phi\) in a Friedmann-Robertson-Walker background. 
Consider an arbitrary free scalar field, which we denote \(\varphi\) to distinguish it from the inflaton field \(\phi\). The Lagrangian for the field is
<<tiddler [[eq:freeaction]]>>
and varying the [[action|eq:freeaction]] gives the Euler-Lagrange equation of motion
<<tiddler [[eq:freescalareom]]>>
It will prove convenient to express the background FRW metric in conformal coordinates
\begin{equation}
g_{\mu\nu} = a^2\left(\tau\right) \eta_{\mu\nu}
\end{equation}
instead of the [[coordinate-time metric|eq:coordinatemetric]] we used in [[(2.3) Inflation from Scalar Fields]]. Here \(\tau\) is the conformal time and \(\eta_{\mu\nu} = {\rm diag.}\left(1,-1,-1,-1\right)\) is the Minkowski metric. In conformal coordinates, the free [[scalar equation of motion|eq:freescalareom]] is
<<tiddler [[eq:FRWfreescalareom]]>>
where \("' = d/d\tau"\) is a derivative with respect to //conformal// time. Note that unlike the case of the inflaton \(\phi\), we are solving for perturbations and therefore retain the gradient term \(\nabla^2 \varphi\). The field \(\varphi\) is a decoupled spectator field evolving in a //fixed// cosmological background, and does not effect the time evolution of the scale factor \(a\left(\tau\right)\). An example of such a field is gravitational waves. If we express the spacetime metric as an FRW background \(g^{\rm FRW}_{\mu\nu}\) plus perturbation \(\delta g_{\mu\nu}\), we can express the tensorial portion of the perturbation in general as a sum of two scalar degrees of freedom
<<tiddler [[eq:tensormetric]]>>
where \(i, j = 1,2,3\), and \(\hat e^{+,\times}_{ij}\) are longitudinal and transverse polarization tensors, respectively. It is left as an exercise for the reader to show that the scalars \(\varphi_{+,\times}\) behave to linear order as [[free scalars|eq:FRWfreescalareom]].

To solve the [[equation of motion|eq:FRWfreescalareom]], we first Fourier expand the field into momentum states \(\varphi_k\),
<<tiddler [[eq:classicalmodeexpansion]]>>
Note that the coordinates \({\bf x}\) are comoving coordinates, and the wavevector \({\bf k}\) is a comoving wavevector, which does not redshift with expansion. The proper wavevector is
\begin{equation}
{\bf k}_{\rm prop} = {\bf k} / a\left(\tau\right).
\end{equation}
Therefore, the comoving wavenumber \({\bf k}\) is not itself dynamical, but is just a set of constants labeling a particular Fourier component.  The equation of motion for a single mode \(\varphi_{\bf k}\) is
\begin{equation}
\varphi_{\bf k}^{\prime\prime} + 2 \left(\frac{a'}{a}\right) \varphi_{\bf k}' + k^2 \varphi_{\bf k} = 0.
\end{equation}
It is convenient to introduce a field redefinition
\begin{equation}
u_k \equiv a\left(\tau\right) \varphi_{\bf k}\left(\tau\right),
\end{equation}
and the mode function \(u_k\) obeys a generalization of the Klein-Gordon equation to an expanding spacetime,
<<tiddler [[eq:generaltensormode]]>>
(We have dropped the vector notation \({\bf k}\) on the subscript, since the Klein-Gordon equation depends only on the magnitude of \(k\).)

Any mode with a fixed comoving wavenumber \(k\) redshifts with time, so that early time corresponds to short wavelength (ultraviolet) and late time corresponds to long wavelength (infrared). The solutions to the mode equation show qualitatively different behaviors in the ultraviolet and infrared limits:

* //Short wavelength limit//, \(k \gg a^{\prime\prime}/a\). 
In this case, the equation of motion is that for a conformally Minkowski Klein-Gordon field,
\begin{equation}
u_k^{\prime\prime} + k^2 u_k = 0,
\end{equation}
with solution
<<tiddler [[eq:UVmode]]>>
Note that this is in terms of //conformal// time and //comoving// wavenumber, and can only be identified with an exactly Minkowski spacetime in the ultraviolet limit. 

*//Long wavelength limit//, \(k \ll a^{\prime\prime}/a\). 
In the infrared limit, the mode equation becomes
\begin{equation}
a^{\prime\prime} u_k = a u_k^{\prime\prime},
\end{equation}
with the trivial solution
<<tiddler [[eq:genericIRsolution]]>>
This illustrates the phenomenon of //mode freezing//: field modes \(\varphi_k\) with wavelength longer than the horizon size cease to be dynamical, and asymptote to a constant, //nonzero// amplitude. ```The second solution to this equation is a decaying mode, which is always subdominant in the infrared limit.``` This is a quantitative expression of our earlier qualitative notion of particle creation at the cosmological horizon. The amplitude of the field at long wavelength is determined by the boundary condition on the mode, //i.e.// the integration constants \(A_k\) and \(B_k\). 

Therefore, all of the physics boils down to the question of how we set the boundary condition on field perturbations in the ultraviolet limit. This is fortunate, since in that limit the field theory describing the modes becomes approximately Minkowskian, and we know how to quantize fields in Minkowski Space. Once the integration constants are fixed, the behavior of the mode function \(u_k\) is completely determined, and the long-wavelength amplitude of the perturbation can then be calculated without ambiguity. We next discuss quantization.
We have seen that the equation of motion for field perturbations approaches the usual Minkowski Space Klein-Gordon equation in the ultraviolet limit, which  corresponds to the limit of early time for a mode redshifting with expansion. We determine the boundary conditions for the mode function via canonical quantization. To quantize the field \(\varphi_k\), we promote the Fourier coefficients in the [[classical mode expansion|eq:classicalmodeexpansion]] to annihilation and creation operators
\begin{equation}
b_{\bf k}\ \rightarrow \hat b_{\bf k},\ \ \ b^*_{\bf k}\ \rightarrow \hat b^\dagger_{\bf k},
\end{equation}
with commutation relation
<<tiddler [[eq:canonicalcommutator]]>>
Note that the commutator in an FRW background is given in terms of //comoving// wavenumber, and holds whether we are in the short wavelength limit or not. In the short wavelength limit, this becomes equivalent to a Minkowski Space commutator. The quantum field \(\varphi\) is then given by the usual expansion in operators \(\hat b_{\bf k}\), \(\hat b^\dagger_{\bf k}\):
<<tiddler [[eq:quantummodeexpansion]]>>

The corresponding canonical momentum is
\begin{equation}
\Pi\left(\tau,{\bf x}\right) \equiv \frac{\delta {\mathcal L}}{\delta\left(\partial_0 \varphi\right)} = a^2\left(\tau\right) \frac{\partial \varphi}{\partial \tau}.
\end{equation}
It is left as an exercise for the reader to show that the canonical commutation relation
<<tiddler [[eq:quantization]]>>
corresponds to a Wronskian condition on the mode \(u_k\),
\begin{equation}
u_k \frac{\partial u^*_k}{\partial \tau} - u^*_k \frac{\partial u_k}{\partial \tau} = i,
\end{equation}
which for the [[ultraviolet mode function|eq:UVmode]] results in a condition on the integration constants
<<tiddler [[eq:quantizationAB]]>>
This quantization condition corresponds to one of the two boundary conditions which are necessary to completely determine the solution. The second boundary condition comes from vacuum selection, //i.e.// our definition of which state corresponds to a zero-particle state for the system. In the next section, we discuss the issue of vacuum selection in detail.
Consider a quantum field in Minkowski Space. The state space for a quantum field theory is a set of states \(\left\vert n({\bf k}_1),\ldots,n({\bf k}_i)\right\rangle\) representing the number of particles with momenta \({\bf k}_1,\ldots,{\bf k}_i\). The creation and annihilation operators \(\hat a^{\dagger}_{\bf k}\) and \({\hat a_{\bf k}}\) act on these states by adding or subtracting a particle from the state:
\begin{eqnarray}
\hat a^{\dagger}_{\bf k} \left\vert n({\bf k})\right\rangle &&= \sqrt{n + 1} \left\vert n({\bf k}) + 1\right\rangle\cr
{\hat a_{\bf k}} \left\vert n({\bf k})\right\rangle &&= \sqrt{n} \left\vert n({\bf k}) - 1\right\rangle.
\end{eqnarray}
The ground state, or vacuum state of the space, is just the zero particle state:
\begin{equation}
{\hat a_{\bf k}} \left\vert 0 \right\rangle = 0.
\end{equation}
Note in particular that the vacuum state \(\left\vert 0 \right\rangle\) is //not// equivalent to zero. The vacuum is not nothing:
\begin{equation}
\left\vert 0 \right\rangle \neq 0.
\end{equation}

To construct a quantum field, we look at the familiar classical wave equation for a scalar field,
<<tiddler [[eqminkowskiwaveequation]]>>
To solve this equation, we decompose into Fourier modes \(u_{\rm k}\),
<<tiddler [[eqfourierexpansion]]>>
where the mode functions \(u_{\bf k}(t)\) satisfy the ordinary differential equation
<<tiddler [[qminkowskimode]]>>
This is a classical wave equation with a classical solution, and the Fourier coefficients \(a_{\rm k}\) are just complex numbers. The solution for the mode function is
<<tiddler [[eqminkowskimodefunction]]>>
where \(\omega_k\) satisfies the dispersion relation
\begin{equation}
\omega_k^2 - {\bf k}^2 = 0.
\end{equation}
To turn this into a quantum field, we identify the Fourier coefficients with creation and annihilation operators
\begin{equation}
a_{\bf k} \rightarrow \hat a_{\bf k},\ a^*_{\bf k} \rightarrow \hat a^{\dagger}_{\bf k},
\end{equation}
and enforce the commutation relations
\begin{equation}
\left[\hat a_{\bf k}, \hat a^{\dagger}_{\bf k'}\right] = \delta^3\left({\bf k} - {\bf k'}\right).
\end{equation}

This is the standard quantization of a scalar field in Minkowski Space, which should be familiar. But what probably is not familiar is that this solution has an interesting symmetry. Suppose we define a new mode function \(u_{\bf k}\) which is a rotation of the [[Minkowski mode function|eqminkowskimodefunction]]:
<<tiddler [[eqrotatedmodefunction]]>>
This is //also// a perfectly valid solution to the [[original wave equation|eqminkowskiwaveequation]], since it is just a superposition of the Fourier modes. But we can then re-write the quantum field in terms of our original Fourier modes and new //operators// \(\hat b_{\bf k}\) and \(\hat b^{\dagger}_{\bf k}\) and the original Fourier modes \(e^{i {\bf k} \cdot {\bf x}}\) as:
\begin{equation}
\phi =  \int{d^3 k \left[{\hat b_{\bf k}} e^{-i \omega t + i {\bf k}\cdot{\bf x}} + \hat b^{\dagger}_{\bf k}  e^{+ i \omega t - i {\bf k}\cdot{\bf x}}\right]},
\end{equation}
where the new operators \(\hat b_{\bf k}\) are given in terms of the old operators \({\hat a_{\bf k}}\) by
\begin{equation}
\hat b_{\bf k} = A(k) \hat a_{\bf k} + B^*(k) \hat a^{\dagger}_{\bf k}.
\end{equation}
This is completely equivalent to our [[original solution|eqfourierexpansion]] as long as the new operators satisfy the same commutation relation as the original operators,
\begin{equation}
\left[\hat b_{\bf k}, \hat b^{\dagger}_{\bf k'}\right] = \delta^3\left({\bf k} - {\bf k'}\right).
\end{equation}
This can be shown to place a condition on the coefficients \(A\) and \(B\),
\begin{equation}
\left\vert A\right\vert^2 - \left\vert B\right\vert^2 = 1.
\end{equation}
Otherwise, we are free to choose \(A\) and \(B\) as we please.

This is just a standard property of linear differential equations: any linear combination of solutions is itself a solution. But what does it mean physically? In one case, we have an annihilation operator \({\hat a_{\bf k}}\) which gives zero when acting on a particular state which we call the vacuum state:
\begin{equation}
{\hat a_{\bf k}} \left\vert 0_a \right\rangle = 0.
\end{equation}
Similarly, our rotated operator \(\hat b_{\bf k}\) gives zero when acting on some state
\begin{equation}
\hat b_{\bf k} \left\vert 0_b\right\rangle = 0.
\end{equation}
The point is that the two "vacuum" states are not the same
\begin{equation}
\left\vert 0_a \right\rangle \neq \left\vert 0_b\right\rangle.
\end{equation}
From this point of view, we can define any state we wish to be the "vacuum" and build a completely consistent quantum field theory based on this assumption. From another equally valid point of view this state will contain particles. How do we tell which is the //physical// vacuum state? To define the real vacuum, we have to consider the spacetime the field is living in. For example, in regular special relativistic quantum field theory, the "true" vacuum is the zero-particle state as seen by an inertial observer. Another more formal way to state this is that we require the vacuum to be Lorentz symmetric. This fixes our choice of vacuum \(\left\vert 0\right\rangle\) and defines unambiguously our set of creation and annihilation operators \(\hat a\) and \(\hat a^{\dagger}\). A consequence of this is that an //accelerated// observer in the Minkowski vacuum will think that the space is full of particles, a phenomenon known as the Unruh effect  ```Unruh (1976)  [[Notes on black hole evaporation|http://inspirehep.net/search?ln=en&p=Unruh:1976db&of=hd]]``` . The zero-particle state for an accelerated observer is different than for an inertial observer. The case of an FRW spacetime is exactly analogous, except that the FRW equivalent of an inertial observer is an observer at rest in comoving coordinates. Since an FRW spacetime is asymptotically Minkowski in the ultraviolet limit, we choose the vacuum field which corresponds to the usual Minkowski vacuum in that limit,
\begin{equation}
u_k\left(\tau\right) \propto e^{-i k \tau}\ \Rightarrow A_k = 1,\ B_k = 0.
\end{equation}
This is known as the //Bunch-Davies// vacuum. The power spectrum of fluctuations is just given by the two-point correlation function of the field,
\begin{equation}
P(k) \propto \left\langle \phi^2 \right\rangle_{k >> a H} \propto \left\vert u_{\bf k} \over a\right\vert^2 \neq 0.
\end{equation}
This means that we have produced //classical// perturbations at long wavelength from quantum fluctuations at short wavelength. 

The Bunch-Davies vacuum is not the only possible choice, although it is widely believed to be the most natural. The issue of vacuum ambiguity of inflationary perturbations is a subject which is extensively discussed in the literature, and is still the subject of controversy. It is known that the choice of vacuum is potentially sensitive to quantum-gravitational physics  ```Hui & Kinney (2002)  [[Short distance physics and the consistency relation for scalar and tensor fluctuations in the inflationary universe|http://inspirehep.net/search?ln=en&p=Hui:2001ce&of=hd]]```  ```Danielsson (2002)  [[A Note on inflation and transPlanckian physics|http://inspirehep.net/search?ln=en&p=Danielsson:2002kx&of=hd]]```  ```Easther, //et al.,// (2002)  [[A Generic estimate of transPlanckian modifications to the primordial power spectrum in inflation|http://inspirehep.net/search?ln=en&p=Easther:2002xe&of=hd]]``` , a subject which is referred to as //Trans-Planckian// physics  ```Martin & Brandenberger (2001)  [[The TransPlanckian problem of inflationary cosmology|http://inspirehep.net/search?ln=en&p=Martin:2000xs&of=hd]]```  ```Niemeyer (2001)  [[Inflation with a Planck scale frequency cutoff|http://inspirehep.net/search?ln=en&p=Niemeyer:2000eh&of=hd]]```  ```Kinney (2003)  [[Cosmology, inflation, and the physics of nothing|http://inspirehep.net/search?ln=en&p=Kinney:2003xf&of=hd]]``` .  This idea is based on a simple observation about scales in the universe. As we discussed in Sec. [[(2.1) Vacuum Energy in Quantum Field Theory]], quantum field theory extended to infinitely high energy scales gives nonsensical (i.e., divergent) results. We therefore expect the theory to break down at high energy, or equivalently at very short lengths. We can estimate the length scale at which quantum mechanical effects from gravity become important by simple dimensional analysis. We define the Planck length \(\ell_{\rm Pl}\) by an appropriate combination of fundamental constants as
\begin{equation}
\ell_{\rm Pl} \sim \sqrt{\hbar G \over c^3} \sim 10^{-35} m.
\end{equation}
For processes probing length scales shorter than \(\ell_{\rm Pl}\), such as quantum modes with wavelengths \(\lambda < \ell_{\rm Pl}\), we expect some sort of new physics to be important. There are a number of ideas for what that new physics might be, for example string theory or noncommutative geometry or discrete spacetime, but physics at the Planck scale is currently not well understood. It is unlikely that particle accelerators will provide insight into such high energy scales, since quantum modes with wavelengths less than \(\ell_{\rm Pl}\) will be characterized by energies of order \(10^{19}\ {\rm GeV}\) or so, and current particle accelerators operate at energies around \(10^{3}\ {\rm GeV}\).```This might not be so in "braneworld" scenarios where the energy scale of quantum gravity can be much lower  (Carames, //et al.,// (2012)  [[Effective gravitational equations for f(R) braneworld models|http://inspirehep.net/search?ln=en&p=braneworld&of=hd]]).``` However, we note an interesting fact, namely that the ratio between the current horizon size of the universe and the Planck length is about
\begin{equation}
{d_{\rm H} \over l_{\rm Pl}} \sim 10^{60},
\end{equation}
or, on a log scale,
\begin{equation}
\ln\left({d_{\rm H} \over l_{\rm Pl}}\right) \sim 140.
\end{equation}
This is a big number, but we recall our earlier discussion of the flatness and horizon problems and note that inflation, in order to adequately explain the flatness and homogeneity of the universe, requires the scale factor to increase by //at least// a factor of \(e^{55}\). Typical models of inflation predict much more expansion, \(e^{1000}\) or more. We remember that the wavelength of quantum modes created during the inflationary expansion, such as those responsible for density and gravitational-wave fluctuations, have wavelengths which redshift proportional to the scale factor, so that so that the wavelength \(\lambda_i\) of a mode at early times can be given in terms of its wavelength \(\lambda_0\) today by
\begin{equation}
\lambda_i \ll e^{-N} \lambda_0.
\end{equation}
This means that if inflation lasts for more than about \(N \sim 140\) e-folds, fluctuations of order the size of the universe today were smaller than the Planck length during inflation! This suggests the possibility that Plank-scale physics might have been important for the generation of quantum modes in inflation. The effects of such physics might be imprinted in the pattern of cosmological fluctuations we see in the CMB and large-scale structure today. Since for an inflationary period that lasts longer than 140 e-folds or so, the fluctuations we see with wavelengths comparable to the horizon size today started out with wavelengths shorter than the Planck length \(\ell_{\rm Pl} \sim 10^{-35}\ {\rm cm}\) during inflation. For a mode with a wavelength that short, do we really know how to select the "vacuum" state, which we have assumed is given by the Bunch-Davies vacuum? Not necessarily. We do know that once the mode redshifts to a wavelength greater than \(\ell_{\rm Pl}\), it must be a solution to the standard [[FRW mode equation|eq:generaltensormode]], but we now longer know for certain how to select the values of the integration constants \(A(k)\) and \(B(k)\). What we have done is mapped the effect of quantum gravity onto a boundary condition for the mode function \(u_{\bf k}\). In principle, \(A(k)\) and \(B(k)\) could be anything! If we allow \(A\) and \(B\) to remain arbitrary, it is simple to calculate the change in the two-point correlation function at long wavelength,
\begin{equation}
P(k) \rightarrow \left\vert A(k) + B(k)\right\vert^2 P_{\rm B-D}(k),
\end{equation}
where the subscript \({\rm B-D}\) indicates the value for the case of the "standard" Bunch-Davies vacuum, which corresponds to the choice \(A = 1\), \(B = 0\). So the power spectrum of gravity-wave and density fluctuations is sensitive to how we choose the vacuum state at distances shorter than the Planck scale, and is in principle sensitive to quantum gravity.

While in principle \(A(k)\) and \(B(k)\) are arbitrary, a great deal of work has been done implementing this idea within the context of reasonable toy models of the physics of short distances. There is some disagreement in the literature with regard to how big the parameter \(B\) can reasonably be. As one might expect on dimensional grounds, the size of the rotation is determined by the dimensionless ratio of the Planck length to the horizon size, so it is expected to be small
\begin{equation}
B \sim \left({l_{\rm Pl} \over d_{\rm H}}\right)^p \sim \left({H \over m_{\rm Pl}}\right)^p\ll 1.
\end{equation}
Here we have introduced a power \(p\) on the ratio, which varies depending on which model of short-distance physics you choose. Several groups have shown an effect linear in the ratio, \(p = 1\). The figure below shows the modulation of the power spectrum calculated in the context of one simple model  ```Danielsson:2002kx1```  ```Easther, //et al.,// (2002)  [[A Generic estimate of transPlanckian modifications to the primordial power spectrum in inflation|http://inspirehep.net/search?ln=en&p=Easther:2002xe&of=hd]]``` .

<<tiddler [[fig:psmodulation]]>>

Others have argued that this is too optimistic, and that a more realistic estimate is \(p = 2\)  ```Kaloper, //et al.,// (2002)  [[Initial conditions for inflation|http://inspirehep.net/search?ln=en&p=Kaloper:2002cs&of=hd]]```  ```Kaloper, //et al.,// (2002)  [[Signatures of short distance physics in the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Kaloper:2002uj&of=hd]]```  or even smaller  ```Kaloper, //et al.,// (2002)  [[Signatures of short distance physics in the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Kaloper:2002uj&of=hd]]``` . The difference is important: if \(p = 1\), the modulation of the power spectrum can be as large as a percent or so, a potentially observable value  ```Bergstrom & Danielsson (2002)  [[Can MAP and Planck map Planck physics?|http://inspirehep.net/search?ln=en&p=Bergstrom:2002yd&of=hd]]```  ```Easther, //et al.,// (2005)  [[Observing trans-Planckian signatures in the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Easther:2004vq&of=hd]]``` .


For the remainder of our discussion, we will assume a Bunch-Davies vacuum: the key point is that quantization and vacuum selection together //completely// specify the mode function, up to an overall phase. This means that the amplitude of the mode once it has redshifted to long wavelength and frozen out is similarly determined. In the next section, we solve the mode equation at long wavelength for an inflationary background.
The exact form of the solution to the [[mode equation|eq:generaltensormode]] depends on the evolution of the background spacetime, as encoded in \(a\left(\tau\right)\), which in turn depends on the equation of state of the field driving inflation. We will consider the case where the equation of state is constant, which will //not// be the case in general for scalar field-driven inflation, but will nonetheless turn out to be a good approximation in the limit of a slowly rolling field. Generalizing the [[de Sitter case|eq:deSittertau]] to the case of arbitrary equation of state parameter \(\epsilon = {\rm const.}\), the conformal time can be written
<<tiddler [[eq:inflconformaltime]]>>
and the [[Friedmann and Raychaudhuri Equations|eq:generalFRW]] give
\begin{equation}
\frac{a^{\prime\prime}}{a} = a^2 H^2 \left(2 - \epsilon\right),
\end{equation}
where a prime denotes a derivative with respect to conformal time. The conformal time, as in the case of de Sitter space, is negative and tending toward zero during inflation. (Proof of these relations is left as an exercise for the reader.) We can then write the [[mode equation|eq:generaltensormode]] as
\begin{equation}
u_k^{\prime\prime} + \left[k^2 - a^2 H^2 \left(2 - \epsilon\right)\right] u_k = 0.
\end{equation}
Writing \(a H\) in terms of the [[conformal time|eq:inflconformaltime]] \(\tau\),  the equation of motion becomes
\begin{equation}
\tau^2 \left(1 - \epsilon\right)^2 u_k^{\prime\prime} + \left[\left(k \tau\right)^2 \left(1 - \epsilon\right)^2 - \left(2 - \epsilon\right)\right] u_k = 0. 
\end{equation}
This is a Bessel equation, with solution
<<tiddler [[eq:generalmodesolution]]>>
where the index \(\nu\) is given by:
\begin{equation}
\nu = \frac{3 - \epsilon}{2 \left(1 - \epsilon\right)}.
\end{equation}
The quantity \(- k \tau\) has special physical significance, since using the [[conformal time|eq:inflconformaltime]] we can write
\begin{equation}
\left(- k \tau\right) \left(1 - \epsilon\right) = \frac{k}{a H},
\end{equation}
where the quantity \(\left(k / a H\right)\) expresses the wavenumber \(k\) in units of the comoving horizon size \(d_{\rm H} \sim (a H)^{-1}\). Therefore, the short wavelength limit is \(-k \tau \rightarrow -\infty\), or \(\left(k / a H\right) \gg 1\). The long-wavelength limit is \(-k \tau \rightarrow 0\), or \(\left(k / a H\right) \ll 1\).

The simple case of de Sitter space (\(p = - \rho\)) corresponds to the limit \(\epsilon = 0\), so that the Bessel index is \(\nu = 3/2\) and the [[mode function|eq:generalmodesolution]] simplifies to
\begin{equation}
u_k \propto \left(\frac{k \tau - i}{k\tau}\right) e^{\pm i k \tau}.
\end{equation}
In the short wavelength limit, \(\left( -k \tau\right) \rightarrow -\infty\), the mode function is given, as expected, by
\begin{equation}
u_k \propto e^{\pm i k \tau}.
\end{equation}
Selecting the Bunch-Davies vacuum gives \(u_k \propto e^{i k \tau}\), and canonical quantization fixes the normalization,
\begin{equation}
u_k = \frac{1}{\sqrt{2 k}} e^{-i k \tau}.
\end{equation}
Therefore, the fully normalized exact solution is
<<tiddler [[eq:deSittermode]]>>
This solution has no free parameters aside from an overall phase, and is valid at //all// wavelengths, including after the mode has been redshifted outside of the horizon and becomes non-dynamical, or "frozen". In the long wavelength limit, \(-k \tau \rightarrow 0\), the [[mode function|eq:deSittermode]] becomes
\begin{equation}
u_k \rightarrow \frac{1}{\sqrt{2 k}} \left(\frac{i}{\left(- k \tau\right)}\right) = \frac{i}{2 k}\left(\frac{a H}{k}\right) \propto a,
\end{equation}
consistent with the [[qualitative result|eq:genericIRsolution]] derived earlier. Therefore the field amplitude \(\varphi_k\) is given by
\begin{equation}
\left\vert \varphi_k\right\vert = \left\vert \frac{u_k}{a}\right\vert \rightarrow \frac{H}{\sqrt{2} k^{3/2}} = {\rm const.}
\end{equation}
The quantum mode therefore displays the freezeout behavior we noted qualitatively above.

<<tiddler [[fig:modefunction]]>>

The amplitude of quantum fluctuations is conventionally expressed in terms of the two-point correlation function of the field \(\varphi\). It is left as an exercise for the reader to show that the vacuum two-point correlation function is given by
\begin{eqnarray}
\left\langle 0 \left\vert \varphi\left(\tau,{\bf x}\right)\varphi\left(\tau,{\bf x}'\right) \right\vert 0 \right\rangle &=& \int{\frac{d^3 k}{\left(2 \pi\right)^3} \left\vert\frac{u_k}{a}\right\vert^2 e^{i {\bf k}\cdot\left({\bf x} - {\bf x}'\right)}}\cr
&=& \int{\frac{d k}{k} P\left(k\right) e^{i {\bf k}\cdot\left({\bf x} - {\bf x}'\right)}},
\end{eqnarray}
where the //power spectrum// \(P\left(k\right)\) is defined as
\begin{equation}
P\left(k\right) \equiv \left(\frac{k^3}{2 \pi^2}\right) \left\vert\frac{u_k}{a}\right\vert^2 \longrightarrow \left(\frac{H}{2 \pi}\right)^2,\ -k \tau \rightarrow 0.
\end{equation}
The power per logarithmic interval \(k\) in the field fluctuation is then given in the long wavelength limit by the Hubble parameter \(H = {\rm const.}\) This property of scale invariance is exact in the de Sitter limit.

In a more general model, the spacetime is only //approximately// de Sitter, and we expect that the power spectrum of field fluctuations will only be approximately scale invariant. It is convenient to express this dynamics in terms of the equation of state parameter \(\epsilon\),
\begin{equation}
\epsilon = \frac{1}{H}\frac{d H}{d N}.
\end{equation}
We must have \(\epsilon < 1\) for inflation, and for a slowly rolling field \(\left\vert \eta\right\vert \ll 1\) means that \(\epsilon\) will also be slowly varying, \(\epsilon \simeq {\rm const.}\) It is straightforward to show that for \(\epsilon = {\rm const.} \neq 0\) that:

* The Bunch-Davies vacuum corresponds to the positive mode of the [[general mode function|eq:generalmodesolution]],
<<tiddler [[eq:generalBDmode]]
* Quantization fixes the normalization as
<<tiddler [[eq:tensormodeexactsoln]]>>
* The power spectrum in the long-wavelength limit \(k / a H \rightarrow 0\) is a power law in \(k\):
<<tiddler [[eq:tensorinfrared]]>>
where \(\Gamma\left(\nu\right)\) is a gamma function, and
\begin{equation}
\nu = \frac{3 - \epsilon}{2 \left(1 - \epsilon\right)}.
\end{equation}
Proof is left as an exercise for the reader.```Note that the [[quantization condition|eq:quantization]] can be applied to the [[solution|eq:generalmodesolution]] exactly, resulting in the [[normalization condition|eq:quantizationAB]], without approximating the solution in the short-wavelength limit!``` Note that in the case \(\epsilon = {\rm const.}\), both the background and perturbation equations are //exactly// solvable.

We can use these solutions as approximate solutions in the more general slow roll case, where \(\epsilon \ll 1 \simeq {\rm const.}\), so that the dependence of the power spectrum on \(k\) is approximately a power-law,
\begin{equation}
P\left(k\right) \propto k^{n},
\end{equation}
with spectral index
<<tiddler [[eq:freescalarn]]>>
The [[infrared mode function|eq:tensorinfrared]] is curious, however, because it does not obviously exhibit complete mode freezing at long wavelength, since \(a\) and \(H\) both depend on time. We can show that \(P\left(k\right)\) does in fact approach a time-dependent value at long wavelength by evaluating
\begin{eqnarray}
&&\frac{d}{d N} \left[H \left(\frac{k}{a H}\right)^{3/2 - \nu}\right] = \frac{d}{d N} \left[H \left(\frac{k}{a H}\right)^{-\epsilon / \left(1 - \epsilon\right)}\right]\cr
&&= H \epsilon \left(\frac{k}{a H}\right)^{-\epsilon / \left(1 - \epsilon\right)} - \frac{\epsilon}{1 - \epsilon} \left(\frac{k}{a H}\right)^{-\epsilon / \left(1 - \epsilon\right) - 1} \left(\frac{k}{a H} - \frac{\epsilon k}{a H}\right)\cr 
&&= 0,
\end{eqnarray}
which can be easily shown using \(a \propto e^{-N}\) and \(H \propto e^{\epsilon N}\). That is, the time-dependent quantities \(a\) and \(H\) in [[the infrared mode function|eq:tensorinfrared]] are combined in such a way as to form an //exactly// conserved quantity. Since it is conserved, we are free to evaluate it at any time (or value of \(a H\)) that we wish. It is conventional to evaluate the power spectrum at //horizon crossing//, or at \(a H = k\), so that
<<tiddler [[eq:horizoncrossing]]>>
where we have approximated the \(\nu\)-dependent multiplicative factor as order unity. ```This is //not// the value of the scalar field power spectrum at the moment the mode is physically crossing outside the horizon, as is often stated in the literature: it is the value of the power spectrum in the asymptotic long-wavelength limit. It is easy to show from the [[exact solution|eq:tensormodeexactsoln]] that the mode function is still evolving with time as it crosses the horizon at \(k = a H\), and the asymptotic amplitude differs from the amplitude at horizon crossing by about a factor of two. See Kinney (2005)  [[Horizon crossing and inflation with large eta|http://inspirehep.net/search?ln=en&p=Kinney:2005vj&of=hd]]  for a more detailed discussion of this point.```

It is straightforward to calculate the [[spectral index|eq:freescalarn]] directly from the [[horizon crossing expression|eq:horizoncrossing]] by using
\begin{equation}
a \propto e^{-N},\ H \propto e^{\epsilon N},
\end{equation}
so that we can write derivatives in \(k\) at horizon crossing as derivatives in the number of e-folds \(N\),
\begin{equation}
d\ln{k}\vert_{k = a H} = d\ln{\left(a H\right)} = \frac{1}{a H} \frac{d \left(a H\right)}{d N} dN = \left(\epsilon - 1\right) d N.
\end{equation}
The spectral index is then, to lowest order in slow roll
\begin{eqnarray}
n = \frac{d \ln{P\left(k\right)}}{d \ln{k}} &&= \frac{k}{H^2}\frac{d H^2}{d k}\bigg\vert_{k = a H}\cr  
&&= \frac{1}{ H^{2} \left(\epsilon - 1\right)} \frac{d H^2}{d N}\cr 
&&= \frac{2 \epsilon}{ \left(\epsilon - 1\right)}\cr
&&\simeq - 2 \epsilon,
\end{eqnarray}
in agreement with [[the free scalar case|eq:freescalarn]]. Note that we are rather freely changing variables from the wavenumber \(k\) to the comoving horizon size \((a H)^{-1}\) to the number of e-folds \(N\). As long as the cosmological evolution is monotonic, these are all different ways of measuring time: the time when a mode with wavenumber \(k\) exits the horizon, the time at which the horizon is a particular size, the number of e-folds \(N\) and the field value \(\phi\) are all effectively just different choices of a clock, and we can switch from one to another as is convenient. For example, in the slow roll approximation, the Hubble parameter \(H\) is just a function of \(\phi\), \(H \propto \sqrt{V\left(\phi\right)}\). Because of this, it is convenient to define \(N\left(k\right)\) to be the [[number of e-folds|eq:srN]] when a mode with wavenumber \(k\) crosses outside the horizon, and \(\phi_N\left(k\right)\) to be the field value \(N\left(k\right)\) e-folds before the end of inflation. Then the power spectrum can be written equivalently as //either// a function of \(k\) or of \(\phi\):
\begin{equation}
P^{1/2}\left(k\right) = \left(\frac{H}{2 \pi}\right)_{k = a H} = \left(\frac{H}{2 \pi}\right)_{\phi = \phi_N\left(k\right)} \simeq \sqrt{\frac{2 V\left(\phi_N\right)}{3 \pi m_{\rm Pl}^2}}.
\end{equation}
Wavenumbers \(k\) are conventionally normalized in units of \(h {\rm Mpc}^{-1}\) as measured in the //current// universe.  We can relate \(N\) to scales in the current universe by recalling that modes which are of order the horizon size in the universe today, \(k \sim a_0 H_0\), exited the horizon during inflation when \(N = \left[46,60\right]\), so that we can calculate the amplitude of perturbations at the scale of the CMB quadrupole today by evaluating the power spectrum for field values between \(\phi_{46}\) and \(\phi_{60}\).

One example of a free scalar in inflation is gravitational wave modes, where the transverse and longitudinal polarization states of the gravity waves evolve as independent scalar fields. Using the [[perturbed metric|eq:tensormetric]], we can then calculate the power spectrum in gravity waves (or //tensors//) as the sum of the two-point correlation functions for the separate polarizations:
\begin{equation}
P_T = \left\langle \delta g_{ij}^{2}\right\rangle = 2 \times \frac{32}{m_{\rm Pl}^2} \left\langle \varphi^2 \right\rangle = \frac{16 H^2}{\pi m_{\rm Pl}^2} \propto k^{n_T},
\end{equation}
with spectral index
\begin{equation}
n_T = - 2 \epsilon.
\end{equation}
If the amplitude is large enough, such a spectrum of primordial gravity waves will be observable in the cosmic microwave background anisotropy and polarization, or be directly detectable by proposed experiments such as Big Bang Observer  ```Smith, //et al.,// (2006)  [[Direct detection of the inflationary gravitational wave background|http://inspirehep.net/search?ln=en&p=Smith:2005mm&of=hd]]```  ```Friedman, //et al.,// (2006)  [[WMAP-normalized Inflationary Model Predictions and the Search for Primordial Gravitational Waves with Direct Detection Experiments|http://inspirehep.net/search?ln=en&p=Friedman:2006zt&of=hd]]``` .

The second type of perturbation generated during inflation is perturbations in the density of the universe, which are the dominant component of the CMB anisotropy \(\delta T / T \sim \delta \rho / \rho \sim 10^{-5}\), and are responsible for structure formation. Density, or //scalar// perturbations are more complicated than tensor perturbations because they are generated by quantum fluctuations in the inflaton field itself: since the background energy density is dominated by the inflaton, fluctuations of the inflaton up or down the potential generate perturbations in the density. The full calculation requires self-consistent General Relativistic perturbation theory, and is presented in [[(3.5) The Curvature Perturbation]].
In this section, we discuss the generation of perturbations in the density \(\delta\left({\bf x}\right) \equiv \delta \rho / \rho\) generated during inflation. The process is similar to the case of a free scalar field discussed in [[(3.4) Exact Solutions and the Primordial Power Spectrum]]: the inflaton field \(\phi\), like any other scalar, will have quantum fluctuations which are stretched to superhorizon scales and subsequently freeze out as classical perturbations. The difference is that the energy density of the universe is dominated by the inflaton potential, so that quantum fluctuations in \(\phi\) generate perturbations in the density \(\rho\). Dealing with such density perturbations is complicated by the fact that in General Relativity, we are free to choose any coordinate system, or //gauge//, we wish. To see why, consider the case of an FRW spacetime evolving with scale factor \(a\left(t\right)\) and uniform energy density \(\rho\left(t,{\bf x}\right) = \bar\rho\left(t\right)\). What we mean here by "uniform" energy density, or homogeneity, is that the density is a constant in //comoving// coordinates. But the physics is independent of coordinate system, so we could equally well work in coordinates \(t'\), \({\bf x}'\) for which constant-time hypersurfaces do //not// have constant density.

<<tiddler [[fig:FRWfoliation]]>>

Such a division of spacetime into a time coordinate and a set of orthogonal spacelike hypersurfaces is called a //foliation// of the spacetime, and is an arbitrary choice equivalent to a choice of coordinate system.

For an FRW spacetime, comoving coordinates correspond to a foliation of the spacetime into spatial hypersurfaces with constant density: this is the most physically intuitive description of the spacetime. Any other choice of foliation of the spacetime would result in density "perturbations" which are entirely due to the choice of coordinate system. Such unphysical perturbations are referred to as //gauge modes//. Another way to think of this is that the division between what we call "background" and what we call "perturbation" is itself gauge-dependent. For perturbations with wavelength smaller than the horizon, it is possible to define background and perturbation without ambiguity, since all observers can agree on a definition of time coordinate \(t\) and on an average density \(\bar\rho\left(t\right)\). Not so for superhorizon modes: if we consider a perturbation mode with wavelength much larger than the horizon size, observers in different horizons will see themselves in independently evolving, homogeneous patches of the universe: a "perturbation" can be defined only by comparing causally disconnected observers, and there is an inherent gauge ambiguity in how we do this. The canonical paper on gauge issues in General Relativistic perturbation theory is by Bardeen  ```Bardeen (1980)  [[Gauge Invariant Cosmological Perturbations|http://inspirehep.net/search?ln=en&p=Bardeen:1980kt&of=hd]]``` . A good pedagogical treatment with a focus on inflationary perturbations can be found in Ref.  ```Komatsu (2002)  [[The pursuit of non-gaussian fluctuations in the cosmic microwave background|http://inspirehep.net/search?ln=en&p=Komatsu:2002db&of=hd]]``` .

In practice, instead of the density perturbation \(\delta\), the quantity most directly relevant to CMB physics is the Newtonian potential \(\Phi\) on the surface of last scattering. For example, this is the quantity that directly appears in [[the Sachs-Wolfe Effect|eq:SachsWolfe]]. The Newtonian potential is related to the density perturbation \(\delta\) through the Poisson Equation:
\begin{equation}
\nabla^2 \Phi =  4 \pi G \bar\rho a^2 \delta,
\end{equation} 
where the factor of \(a^2\) comes from defining the gradient \(\nabla\) relative to comoving coordinates. Like \(\delta\), the Newtonian potential \(\Phi\) is a gauge-dependent quantity: its value depends on how we foliate the spacetime. For example, we are free to choose spatial hypersurfaces such that the density is constant, and the Newtonian potential vanishes everywhere: \(\Phi\left(t,{\bf x}\right) = 0\). This foliation of the spacetime is equivalent to the qualitative picture above of different horizon volumes as independently evolving homogeneous universes. Observers in different horizons use the density \(\rho\) to synchronize their clocks with one another. Such a foliation is not very useful for computing the Sachs-Wolfe effect, however! Instead, we need to define a gauge which corresponds to the Newtonian limit in the present universe. To accomplish this, we describe the evolution of a scalar field dominated cosmology using the useful fluid flow approach  ```Hawking (1966)  [[Perturbations of an expanding universe|http://inspirehep.net/search?ln=en&p=Hawking:1966qi&of=hd]]```  ```Ellis & Bruni (1989)  [[COVARIANT AND GAUGE INVARIANT APPROACH TO COSMOLOGICAL DENSITY FLUCTUATIONS|http://inspirehep.net/search?ln=en&p=Ellis:1989jt&of=hd]]```  ```Liddle & Lyth (1993)  [[The Cold dark matter density perturbation|http://inspirehep.net/search?ln=en&p=Liddle:1993fq&of=hd]]```  ```Sasaki & Stewart (1996)  [[A General analytic formula for the spectral index of the density perturbations produced during inflation|http://inspirehep.net/search?ln=en&p=Sasaki:1995aw&of=hd]]```  ```Challinor & Lasenby (1999)  [[Cosmic microwave background anisotropies in the CDM model: A Covariant and gauge invariant approach|http://inspirehep.net/search?ln=en&p=Challinor:1998xk&of=hd]]``` . (An alternate strategy involves the construction of gauge-invariant variables: see Sasaki  ```Kodama & Sasaki (1984)  [[Cosmological Perturbation Theory|http://inspirehep.net/search?ln=en&p=Kodama:1985bj&of=hd]]``` and Mukhanov, //et. al.//  ```Mukhanov, //et al.,// (1992)  [[Theory of cosmological perturbations. Part 1. Classical perturbations. Part 2. Quantum theory of perturbations. Part 3. Extensions|http://inspirehep.net/search?ln=en&p=Mukhanov:1990me&of=hd]]```  for reviews.)

Consider a scalar field \(\phi\) in an arbitrary background \(g_{\mu\nu}\). The stress-energy tensor of the scalar field may be written
<<tiddler [[eq:generalstressenergy]]>>
Note that we have not yet made any assumptions about the metric \(g_{\mu \nu}\) or about the scalar field \(\phi\), and this is a completely general expression. We can define a fluid four-velocity for the scalar field by
<<tiddler [[eq:deffourvelocity]]>>
It is not immediately obvious why this should be considered a four-velocity. Consider any perfect fluid filling spacetime. Each element of the fluid has four-velocity \(u^\mu\left(x\right)\) at every point in spacetime which is everywhere timelike,
\begin{equation}
u^\mu\left(x\right) u_\mu\left(x\right) = 1\ \forall x.
\end{equation}
Such a collection of four-vectors is called a //timelike congruence//. We can draw the congruence defined by the fluid four-velocity as a set of [[flow lines in spacetime|fig:timelikecongruence]]. Each event \(P\) in spacetime has one and only one flow line passing through it. The fluid four-velocity is then a set of unit-normalized tangent vectors to the flow lines, \(u^\mu u_\mu = 1\).

<<tiddler [[fig:timelikecongruence]]>>

For a scalar field, we construct a timelike congruence using the [[four velocity|eq:deffourvelocity]], which is by construction unit normalized:
\begin{equation}
u^\mu u_\mu = \frac{g^{\mu \nu} \phi_{,\mu} \phi_{,\nu}}{g^{\alpha \beta} \phi_{,\alpha} \phi_{,\beta}} = 1.
\end{equation}

We then define the "time" derivative of any scalar quantity \(f(x)\) by the projection of the derivative along the fluid four-velocity:
<<tiddler [[eq:deftimederiv]]>>
In particular, the time derivative of the scalar field itself is
<<tiddler [[eq:defphidot]]>>
Note that in the homogeneous case, we recover the usual time derivative,
\begin{equation}
\nabla \phi = 0 \Rightarrow \dot\phi = \sqrt{g^{00} \phi_{,0} \phi_{,0}} = \frac{d \phi}{d t}.
\end{equation}
The [[stress-energy tensor|eq:generalstressenergy]] in terms of \(\dot\phi\) takes the form
\begin{equation}
T_{\mu\nu} = \left[\frac{1 }{ 2} \dot\phi^2 + V\left(\phi\right)\right] u_\mu u_\nu + \left[\frac{1 }{ 2} \dot\phi^2 - V\left(\phi\right)\right] \left(u_\mu u_\nu - g_{\mu\nu}\right).
\end{equation}
We can then define a generalized density \(\rho\) and and pressure \(p\) by
<<tiddler [[eq:defrhop]]>>
Note that despite the familiar form of these expressions, they are defined without any assumption of homogeneity of the scalar field or even the imposition of a particular  metric.

In terms of the generalized density and pressure, the [[stress-energy|eq:generalstressenergy]] is
<<tiddler [[eq:simplestressenergy]]>>
where the tensor \(h_{\mu\nu}\) is defined as:
\begin{equation}
h_{\mu\nu} \equiv u_\mu u_\nu - g_{\mu\nu}.
\end{equation}
The tensor \(h_{\mu\nu}\) can be easily seen to be a projection operator onto hypersurfaces orthogonal to the four-velocity \(u^\mu\). For any vector field \(A^\mu\), the product \(h_{\mu\nu} A^\nu\) is identically orthogonal to the four-velocity:
\begin{equation}
\left(h_{\mu\nu} A^\nu\right) u^\mu = A^\nu \left(h_{\mu\nu} u^\mu\right) = 0.
\end{equation}
Therefore, as in the case of the time derivative, we can define gradients by projecting the derivative onto surfaces orthogonal to the four-velocity
<<tiddler [[eq:defgradient]]>>
In the case of a [[scalar field fluid|eq:deffourvelocity]], the gradient of the field identically vanishes,
\begin{equation}
\left(\nabla \phi\right)^\mu = 0.
\end{equation}
Note that despite its relation to a "spatial" gradient, \(\nabla f\) is a covariant quantity, //i.e.// a four-vector.

Our fully covariant definitions of "time" derivatives and "spatial" gradients suggest a natural foliation of the spacetime into spacelike hypersurfaces, with time coordinate orthogonal to those hypersurfaces. We can define spatial hypersurfaces to be everywhere [[orthogonal to the fluid flow|fig:comovingfoliation]]. This is equivalent to choosing a coordinate system for which \(u^i = 0\) everywhere. Such a gauge choice is called //comoving// gauge. In the case of a scalar field, we can equivalently define comoving gauge as a coordinate system in which spatial gradients of the scalar field \(\phi_{,i}\) are defined to vanish. Therefore the [[time derivative|eq:deftimederiv]] is just the derivative with respect to the coordinate time in comoving gauge
\begin{equation}
\dot\phi = \left(\frac{\partial \phi }{ \partial t}\right)_{\rm c}.
\end{equation}
Similarly, the [[generalized density and pressure|eq:defrhop]] are just defined to be those quantities as measured in comoving gauge.

<<tiddler [[fig:comovingfoliation]]>>

The equations of motion for the fluid can be derived from stress-energy conservation,
\begin{equation}
T^{\mu\nu}{}_{\!;\nu} = 0 = \dot\rho u^\mu + (\nabla p)^\mu + \left(\rho + p\right) \left(\dot u^\mu + u^\mu \Theta\right),
\end{equation}
where the quantity \(\Theta\) is defined as the divergence of the four-velocity,
\begin{equation}
\Theta \equiv u^\mu{}_{\!;\mu}.
\end{equation}
We can group the terms multiplied by \(u^\mu\) separately, resulting in familiar-looking equations for the generalized density and pressure
\begin{eqnarray}
\dot\rho + \Theta \left(\rho + p\right) = 0,&&\cr
(\nabla p)^\mu + \left(\rho + p\right) \dot u^\mu = 0.&&
\end{eqnarray}
The first of these equations, similar to the usual continuity equation in the homogeneous case, can be rewritten using the definitions of the [[generalized density and pressure|eq:defrhop]] in terms of the field as
<<tiddler [[eq:generalizedeqofmotion]]>>
This suggests identifying the divergence \(\Theta\) as a generalization of the Hubble parameter \(H\) in the homogeneous case. In fact, if we take \(g_{\mu\nu}\) to be a flat Friedmann-Robertson-Walker (FRW) metric and take comoving gauge, \(u^\mu = (1,0,0,0)\), we have
\begin{equation}
u^\mu{}_{\!;\mu} = 3 H,
\end{equation}
and the [[generalized equation of motion|eq:generalizedeqofmotion]] becomes the familiar equation of motion for a homogeneous scalar,
\begin{equation}
\ddot\phi + 3 H \dot\phi + V'\left(\phi\right) = 0.
\end{equation}

Now consider perturbations \(\delta g_{\mu\nu}\) about a flat FRW metric,
\begin{equation}
g_{\mu\nu} = a^2\left(\tau\right) \left[\eta_{\mu\nu} + \delta g_{\mu\nu}\right],
\end{equation}
where \(\tau\) is the conformal time and \(\eta\) is the Minkowski metric \(\eta = {\rm diag}\left(1,-1,-1,-1\right)\). A general metric perturbation \(\delta g_{\mu\nu}\) can be separated into components which transform independently under coordinate transformations  ```Bardeen (1980)  [[Gauge Invariant Cosmological Perturbations|http://inspirehep.net/search?ln=en&p=Bardeen:1980kt&of=hd]]``` ,
\begin{equation}
\delta g_{\mu\nu} = \delta g_{\mu\nu}^{\rm scalar} + \delta g_{\mu\nu}^{\rm vector} + \delta g_{\mu\nu}^{\rm tensor}.
\end{equation}
The tensor component is just the transverse-traceless gravitational wave perturbation, discussed in [[(3.4) Exact Solutions and the Primordial Power Spectrum]], and vector perturbations are not sourced by single-field inflation. We therefore specialize to the case of scalar perturbations, for which the metric perturbations can be written generally in terms of four scalar functions of space and time \(A\), \(B\), \({\cal R}\), and \(H_T\):
\begin{eqnarray}
&&\delta g_{00} = 2 A\cr
&&\delta g_{0i} = \partial_i B\cr
&&\delta g_{ij} = 2 \left[{\cal R} \delta_{ij} + \partial_i \partial_j H_T\right].
\end{eqnarray}
We are interested in calculating \(\cal R\). Recall that in the Newtonian limit of General Relativity, we can write perturbations about the Minkowski metric in terms of the Newtonian potential \(\Phi\) as:
\begin{equation}
ds^2 = \left( 1 + 2 \Phi\right) dt^2 - \left(1 - 2 \Phi\right) \delta_{ij} dx^i dx^j.
\end{equation}
Similarly, we can write Newtonian perturbations about a flat FRW metric as
\begin{equation}
ds^2 = a^2\left(\tau\right) \left[\left( 1 + 2 \Phi\right) d\tau^2 - \left(1 - 2 \Phi\right) \delta_{ij} dx^i dx^j\right].
\end{equation}
We therefore expect \(\Phi \propto {\cal R}\) in the Newtonian limit. A careful calculation  ```Kodama & Sasaki (1984)  [[Cosmological Perturbation Theory|http://inspirehep.net/search?ln=en&p=Kodama:1985bj&of=hd]]```  ```Liddle & Lyth (1993)  [[The Cold dark matter density perturbation|http://inspirehep.net/search?ln=en&p=Liddle:1993fq&of=hd]]```  gives
\begin{equation}
\Phi = - \frac{3 \left(1 + w\right)}{5 + 3 w} {\cal R},
\end{equation}
so that in a matter-dominated universe,
\begin{equation}
\Phi = - \frac{3}{5} {\cal R}.
\end{equation}
In these expressions, \({\cal R}\) is the curvature perturbation measured on comoving hypersurfaces. To see qualitatively why comoving gauge corresponds correctly to the Newtonian limit in the current universe, consider the end of inflation. Since inflation ends at a particular field value \(\phi = \phi_e\), comoving gauge corresponds to a foliation for which inflation ends at //constant time// at all points in space: all observers synchronize their clocks to \(\tau = 0\) at the end of inflation. This means that the background, or unperturbed universe is exactly the [[homogeneous case|fig:INFLdiagram]], and the comoving curvature perturbation \({\cal R}\) is the Newtonian potential measured relative to that background.

To calculate \({\cal R}\), we start by calculating the four-velocity \(u^\mu\) in terms of the perturbed metric.```This treatment closely follows that of Sasaki and Stewart  (Sasaki & Stewart (1996)  [[A General analytic formula for the spectral index of the density perturbations produced during inflation|http://inspirehep.net/search?ln=en&p=Sasaki:1995aw&of=hd]]) , except that we use the opposite sign convention for \(N\).``` If we specialize to comoving gauge, \(u^i \equiv 0\), the norm of the four-velocity can be written
\begin{equation}
u^\mu u_\mu = a^2 \left(1 + 2 A\right) \left(u^0\right)^2 = 1,
\end{equation}
and the timelike component of the four-velocity is, to linear order,
\begin{eqnarray}
&&u^0 = \frac{1 }{ a} \left(1 - A\right)\cr
&&u_0 = a \left(1 + A\right).
\end{eqnarray}
The velocity divergence \(\Theta\) is then
<<tiddler [[eq:thetacomoving]]>>
where the unperturbed Hubble parameter is defined as
\begin{equation}
H \equiv \frac{1 }{ a^2} \frac{\partial a }{ \partial \tau}.
\end{equation}
Fourier expanding \(H_T\),
\begin{equation}
\partial_i \partial_i H_T = k^2 H_T,
\end{equation}
we see that for long-wavelength modes \(k \ll a H\), the last term in the [[velocity divergence|eq:thetacomoving]] can be ignored, and the velocity divergence is
<<tiddler [[eq:comovingtheta]]>>
Remembering the definition of the number of e-folds in the unperturbed case,
\begin{equation}
N \equiv - \int{H dt}.
\end{equation}
we can define a generalized number of e-folds as the integral of the velocity divergence along comoving world lines:
\begin{equation}
{\cal N} \equiv - \frac{1 }{ 3} \int{\Theta d s} = - \frac{1 }{ 3} \int{\Theta \left[a \left(1 + A\right) d \tau\right]}.
\end{equation}
Evaluating to linear order in the metric perturbation results in
\begin{equation}
{\cal N} = {\cal R} - \int{H d t},
\end{equation}
and we have a simple expression for the curvature perturbation,
\begin{equation}
{\cal R} = {\cal N} - N.
\end{equation}

This requires a little physical interpretation:  we defined comoving hypersurfaces such that the field has no spatial variation,
\begin{equation}
\left(\nabla \phi\right)^\mu = 0\ \Rightarrow \phi = {\rm const.}
\end{equation}
Then \(\cal N\) is the number of e-folds measured on comoving hypersurfaces. But we can equivalently foliate the spacetime such that spatial hypersurfaces are flat, and the field exhibits spatial fluctuations:
\begin{equation}
A = {\cal R} = 0\ \Rightarrow \phi \neq {\rm const.}
\end{equation}
On flat hypersurfaces, the field varies, but the curvature does not, so that the metric on these hypersurfaces is exactly of the [[FRW form|eq:generalFRWmetric]] with \(k = 0\). We then see immediately that
\begin{equation}
N = - \int{H d t} = {\rm const.}
\end{equation}
is the number of e-folds measured on flat hypersurfaces, and has no spatial variation. The curvature perturbation \(\cal R\) is the difference in the number of e-folds between the two sets of [[hypersurfaces|fig:hypersurfaces]]. This can be expressed to linear order in terms of the field variation \(\delta\phi\) on flat hypersurfaces as
\begin{equation}
{\cal R} = {\cal N} - N = - \frac{\delta N}{\delta \phi} \delta\phi
\end{equation}
where \(\cal R\) is measured on comoving hypersurfaces, and \(\delta N / \delta\phi\) and \(\delta\phi\) are measured on flat hypersurfaces.

<<tiddler [[fig:hypersurfaces]]>>

We can express \(N\) as a function of the field \(\phi\):
<<tiddler [[eq:defN]]>>
For monotonic field evolution, we can express \(\dot\phi\) as a function of \(\phi\), so that
\begin{equation}
\frac{\delta N }{ \delta\phi} = - \frac{H }{ \dot\phi},
\end{equation}
and the curvature perturbation is given by
\begin{equation}
{\cal R} = {\cal N} - N = - \frac{\delta N }{ \delta\phi} \delta\phi = \frac{H }{ \dot\phi} \delta\phi.
\end{equation}
Note that this is an expression for the metric perturbation \({\cal R}\) on comoving hypersurfaces, calculated in terms of quantities defined on //flat// hypersurfaces. For \(\delta\phi\) produced by quantum fluctuations in inflation, the power spectrum is
\begin{equation}
P_\phi = \frac{k^3}{2 \pi^2} \left\langle \delta\phi^2\right\rangle = \left(\frac{H }{ 2 \pi}\right)^2,
\end{equation}
Perturbations in the inflaton field \(\delta\phi \simeq H / 2 \pi\) generate density perturbations with power spectrum
\begin{equation}
P_{\cal R}\left(k \right) = \left(\frac{\delta N}{\delta\phi}\right)^2 P_\phi = \frac{H^2}{\pi m_{\rm Pl}^2 \epsilon}\bigg\vert_{k = a H} \propto k^{n_S - 1},
\end{equation}
where \(N\) is the number of e-folds. Scalar perturbations are therefore enhanced relative to tensor perturbations by a factor of \(1 / \epsilon\). The scalar power spectrum is also an approximate power-law, with spectral index
\begin{equation}
n_S - 1=  \frac{\epsilon}{H^{2} \left(\epsilon - 1\right)} \frac{d}{d N}\left(\frac{H^2}{\epsilon}\right) \simeq - 4 \epsilon + 2 \eta,
\end{equation}
where \(\eta\) is the [[second slow roll parameter|eq:defeta]]. Therefore, for any particular choice of inflationary potential, we have four measurable quantities: the amplitudes \(P_T\) and \(P_{\cal R}\) of the tensor and scalar power spectra, and their spectral indices \(n_T\) and \(n_S\). However, not all of these parameters are independent. In particular, the ratio \(r\) between the scalar and tensor amplitudes is given by the parameter \(\epsilon\), as is the tensor spectral index \(n_T\):
<<tiddler [[eq:consistency]]>>
This relation is known as the //consistency condition// for single-field slow roll inflation, and is in principle testable by a sufficiently accurate measurement of the primordial perturbation spectra.

In the next section, we apply these results to our example \(\lambda \phi^4\) potential and calculate the inflationary power spectra.
For the case of our example model with \(V\left(\phi\right) = \lambda \phi^4\), it is now straightforward to calculate the scalar and tensor perturbation spectra. We express the normalization of the power spectra as a function of the number of e-folds \(N\) by
\begin{eqnarray}
P_{\cal R}^{1/2} &=& \frac{H}{m_{\rm Pl} \sqrt{\pi \epsilon}}\bigg\vert_{\phi=\phi_N}\cr
&=& \frac{4 \sqrt{24 \pi}}{3 m_{\rm Pl}^3} \frac{\left[V\left(\phi_N\right)\right]^{3/2}}{V'\left(\phi_N\right)}\cr
&=& \frac{24 \pi}{3} \left(\frac{N + 1}{\pi}\right) \lambda^{1/2} \sim 10^{-5},
\end{eqnarray}
where we have used the slow roll expressions for [[\(H\)|eq:srHubble]] and [[\(\epsilon\)|eq:srepsilon]] and for [[\(\phi_N\)|eq:phi4N]]. For perturbations about the current size of our horizon, \(N = 60\), and CMB normalization forces the self-coupling to be very small,
\begin{equation}
\lambda \sim 10^{-15}.
\end{equation}
The presence of an extremely small parameter is not peculiar to the \(\lambda \phi^4\) model, but is generic, and is referred to as the //fine tuning// problem for inflation.

We can similarly calculate the tensor amplitude
\begin{equation}
P_T^{1/2} = \frac{4 H}{m_{\rm Pl} \sqrt{\pi}},
\end{equation}
which is usually expressed in terms of the tensor/scalar ratio
\begin{eqnarray}
r &=&  16 \epsilon\left(\phi_N\right) =\frac{m_{\rm Pl}}{\pi} \left(\frac{V'\left(\phi_N\right)}{V\left(\phi_N\right)}\right)^2 \cr
&=& \frac{16}{\pi} \left(\frac{m_{\rm Pl}}{\phi_N}\right)^2 = \frac{16}{N + 1} \simeq 0.26,
\end{eqnarray}
where we have again taken \(N = 60\). For this particular model, the power in gravitational waves is large, about a quarter of the power in scalar perturbations. This is //not// generic, but is quite model-dependent. Some choices of potential predict large tensor contributions (where "large" means of order 10\

The tensor spectral index \(n_T\) is fixed by the [[consistency condition|eq:consistency]], but the scalar spectral index \(n_S\) is an independent parameter because of its dependence on \(\eta\):
\begin{equation}
n = 1 - 4 \epsilon\left(\phi_N\right) + 2 \eta\left(\phi_N\right),
\end{equation}
where
\begin{equation}
\epsilon\left(\phi_N\right) = \frac{1}{N + 1},
\end{equation}
and
\begin{eqnarray}
\eta\left(\phi_N\right) &=& \frac{m_{\rm Pl}^2}{8 \pi} \left[\frac{V^{\prime\prime}\left(\phi_N\right)}{V\left(\phi_N\right)} - \frac{1}{2} \left(\frac{V'\left(\phi_N\right)}{V\left(\phi_N\right)}\right)^2 \right]\cr
&=& \frac{m_{\rm Pl}^2}{8 \pi} \left[\frac{12}{\phi_N^2} - \frac{8}{\phi_N^2}\right]\cr
&=& \frac{1}{2 \pi} \left(\frac{m_{\rm Pl}}{\phi_N}\right)^2 = \frac{1}{2 \left(N + 1\right)}.
\end{eqnarray}
The spectral index is then
\begin{equation}
n =  1 - \frac{3}{N + 1} \simeq 0.95.
\end{equation}
Note that we have assumed slow roll from the beginning in the calculation without //a priori// knowing that it is a good approximation for this choice of potential. However, at the end of the day it is clear that the slow roll ansatz was a good one, since \(\epsilon\) and \(\eta\) are both of order \(0.01\).

Finally, we note that the energy density during inflation is characterized by a mass scale
\begin{equation}
\rho^{1/4} \sim \Lambda \sim \lambda^{1/4} m_{\rm Pl} \sim 10^{15}\ {\rm GeV},
\end{equation}
about the scale for which we expect Grand Unification to be important. This interesting coincidence suggests that the physics of inflation may be found in Grand Unified Theories (GUTs). Different choices of potential \(V\left(\phi\right)\) will give different values for the amplitudes and shapes of the primordial power spectra. Since the normalization is fixed by the CMB to be \(P_{\cal R} \sim 10^{-5}\), the most useful observables for distinguishing among different potentials are the scalar/tensor ratio \(r\) and the scalar spectral index \(n_S\). In single-field inflationary models, the tensor spectral index is fixed by the [[consistency condition|eq:consistency]], and is therefore not an independent parameter. The consistency condition can therefore be taken to be a //prediction// of single-field inflation, which is in principle verifiable by observation. In practice, this is very difficult, since it involves measuring not just the amplitude of the gravitational wave power spectrum, but also its //shape//. We will see in Section [[(4.0) Observational Constraints]] that current data place only a rough upper bound on the tensor/scalar ratio \(r\), and it is highly unlikely that any near-future measurement of primordial gravitational waves will be accurate enough to constrain \(n_T\) well enough to test the consistency condition. In the next section, we discuss current observational constraints on the form of the inflationary potential.
Our simple picture of inflation generated by single, minimally coupled scalar field makes a set of very definite predictions for the form of primordial cosmological fluctuations:

* Gaussianity: since the two-point correlation function of a free scalar field \(\left\langle \varphi^2\right\rangle\) is Gaussian, cosmological perturbations generated in a single-field inflation model will by necessity also form a Gaussian random distribution.
* Adiabaticity: since there is only one order parameter \(\phi\) governing the generation of density perturbations, we expect the perturbations in all the components of the cosmological fluid (baryons, dark matter, neutrinos) to be //in phase// with each other. Such a case is called //adiabatic//. If one or more components fluctuates out of phase with others, these are referred to as //isocurvature// modes. Single-field inflation predicts an absence of isocurvature fluctuations.
* Scale invariance: In the limit of de Sitter space, fluctuations in any quantum field are exactly scale invariant, \(n = 1\), as a result of the fact that the Hubble parameter is exactly constant. Since slow-roll inflation is quasi-de Sitter, we expect the perturbation spectra to be nearly, but not exactly scale invariant, with \(\left\vert n_S - 1\right\vert = \left\vert  2 \eta - 4 \epsilon \right\vert \ll 1\).
* Scalar perturbations dominate over tensor perturbations, \(r = 16 \epsilon\).


Furthermore, given a potential \(V\left(\phi\right)\), we have a "recipe" for calculating the form of the primordial power spectra generated during inflation:

(1) Calculate the field value at the end of inflation \(\phi_e\) from 
\begin{equation}
\epsilon\left(\phi_e\right) = \frac{m_{\rm Pl}^2}{16 \pi} \left(\frac{V'\left(\phi_e\right)}{V\left(\phi_e\right)}\right)^2 = 1.
\end{equation}

(2) Calculate the field value \(N\) e-folds before the end of inflation \(\phi_N\) by integrating backward on the potential from \(\phi = \phi_e\),
\begin{equation}
N = \frac{2 \sqrt{\pi}}{m_{\rm Pl}} \int_{\phi_e}^{\phi_N}{\frac{d\phi'}{\sqrt{\epsilon\left(\phi'\right)}}}.
\end{equation}

(3) Calculate the normalization of the scalar power spectrum by
\begin{equation}
P_{\cal R}^{1/2} = \frac{H}{m_{\rm Pl} \sqrt{\pi \epsilon}}\bigg\vert_{\phi=\phi_N} \sim 10^{-5},
\end{equation}
where the CMB quadrupole corresponds to roughly \(N = 60\). A more accurate calculation includes the uncertainty in the reheat temperature, which gives a range \(N \simeq \left[46,60\right]\), and a corresponding uncertainty in the observable parameters. 

(4) Calculate the tensor/scalar ratio \(r\) and scalar spectral index \(n_S\) at \(N = [46,60]\) by
\begin{equation}
r = 16 \epsilon\left(\phi_N\right),
\end{equation}
and
\begin{equation}
n_s = 1 - 4 \epsilon\left(\phi_N\right)  + 2 \eta\left(\phi_N\right),
\end{equation}
where the second slow roll parameter \(\eta\) is given by:
\begin{equation}
\eta\left(\phi_N\right) = \frac{m_{\rm Pl}^2}{8 \pi} \left[\frac{V^{\prime\prime}\left(\phi_N\right)}{V\left(\phi_N\right)} - \frac{1}{2} \left(\frac{V'\left(\phi_N\right)}{V\left(\phi_N\right)}\right)^2 \right].
\end{equation}

The key point is that the scalar power spectrum \(P_{\cal R}\) and the tensor power spectrum \(P_T\) are both completely determined by the choice of potential \(V\left(\phi\right)\).```Strictly speaking, this is true only for scalar fields with a canonical kinetic term, where the speed of sound of perturbations is equal to the speed of light. More complicated scenarios such as DBI inflation  (Silverstein & Tong (2004)  [[Scalar speed limits and cosmology: Acceleration from D-cceleration|http://inspirehep.net/search?ln=en&p=Silverstein:2003hf&of=hd]])  require specification of an extra free function, the speed of sound \(c_S\left(\phi\right)\), to calculate the power spectra. For constraints on this more general class of models, see Agarwal & Bean (2009)  [[Cosmological constraints on general, single field inflation|http://inspirehep.net/search?ln=en&p=Agarwal:2008ah&of=hd]].``` Therefore, if we measure the primordial perturbations in the universe accurately enough, we can in principle constrain the form of the inflationary potential. This is extremely exciting, because it gives us a very rare window into physics at extremely high energy, perhaps as high as the GUT scale or higher, far beyond the reach of accelerator experiments such as the Large Hadron Collider.

It is convenient to divide the set of possible single-field potentials into a few basic types  ```Dodelson, //et al.,// (1997)  [[Cosmic microwave background measurements can discriminate among inflation models|http://inspirehep.net/search?ln=en&p=Dodelson:1997hr&of=hd]]``` :

* "//Large-field potentials//. These are the simplest potentials one might imagine, with potentials of the form \(V\left(\phi\right) = m^2 \phi^2\), or our example case, \(V\left(\phi\right) = \lambda \phi^4\). Another widely-noted example of this type of model is inflation on an exponential potential, \(V\left(\phi\right) = \Lambda^4 \exp{\left(\phi/\mu\right)}\), which has the useful property that both the background evolution and the perturbation equations are exactly solvable. In the large-field case, the field is displaced from the vacuum at the origin by an amount of order \(\phi \sim m_{\rm Pl}\) and rolls down the potential toward the origin. Large-field models are typically characterized by a "red" spectral index \(n_S < 1\), and a substantial gravitational wave contribution, \(r \sim 0.1\)." 

 <<tiddler [[fig:largefield]]>>

* "//Small-field potentials//. These are potentials characteristic of spontaneous symmetry breaking phase transitions, where the field rolls off an unstable equilibrium with \("V'\left(\phi\right) = 0"\) toward a displaced vacuum. Examples of small-field inflation include a simple quadratic potential, \(V\left(\phi\right) = \lambda \left(\phi^2 - \mu^2\right)^2\), inflation from a pseudo-Nambu-Goldstone boson or a shift symmetry in string theory (called //Natural Inflation//) with a potential typically of the form \(V\left(\phi\right) = \Lambda^4 \left[1 + \cos{\left(\phi / \mu\right)}\right]\), or Coleman-Weinberg potentials, \(V\left(\phi\right) = \lambda \phi^4 \ln\left(\phi\right)\). Small-field models are characterized by a red spectral index \(n < 1\), and a small tensor/scalar ratio, \(r \leq 0.01\)."

<<tiddler [[fig:smallfield]]>>

* "//Hybrid potentials//. A third class of models are potentials for which there is a residual vacuum energy when the field is at the minimum of the potential, for example a potential like \(V\left(\phi\right) = \lambda \left(\phi^2 + \mu^2\right)^2\). In this case, inflation will continue //forever//, so additional physics is required to end inflation and initiate reheating. The //hybrid// mechanism, introduced by Linde  ```Linde (1994)  [[Hybrid inflation|http://inspirehep.net/search?ln=en&p=Linde:1993cn&of=hd]]``` , solves this problem by adding a second field coupled to the inflaton which is stable for \(\phi\) large, but becomes unstable at a critical field value \(\phi_c\) near the minimum of \(V\left(\phi\right)\) . During inflation, however, only \(\phi\) is dynamical, and these models are effectively single-field. Typical models of this type predict negligible tensor modes, \(r \ll 0.01\) and a "blue" spectrum, \(n_S > 1\), which is disfavored by the data, and we will not discuss them in more detail here. (Ref.  ```Komatsu, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Cosmological Interpretation|http://inspirehep.net/search?ln=en&p=Komatsu:2008hk&of=hd]]```  contains a good discussion of current limits on general hybrid models.) Note also that such potentials will also support large-field inflation if the field is displaced far enough from its minimum. "

<<tiddler [[fig:hybrid]]>>

An important feature of all of these models is that each is characterized by two basic parameters, the "height" of the potential \(\Lambda^4\), which governs the energy density during inflation, and the "width" of the potential \(\mu\). (Hybrid models have a third free parameter \(\phi_c\) which sets the end of inflation.) In order to have a flat potential and a slowly rolling field, there must be a hierarchy of scales such that the width is larger than the height, \(\Lambda \ll \mu\). As we saw in the case of the \(\lambda \phi^4\) large-field model, typical inflationary potentials have widths of order the Planck scale \(\mu \sim m_{\rm Pl}\) and heights of order the scale of Grand Unification \(\Lambda \sim M_{\rm GUT} \sim 10^{15}\ {\rm GeV},\) although models can be constructed for which inflation happens at a much lower scale  ```Knox & Turner (1993)  [[Inflation at the electroweak scale|http://inspirehep.net/search?ln=en&p=Knox:1992iy&of=hd]]```  ```Linde (1994)  [[Hybrid inflation|http://inspirehep.net/search?ln=en&p=Linde:1993cn&of=hd]]```  ```Kinney & Mahanthappa (1996)  [[Inflation at low scales: General analysis and a detailed model|http://inspirehep.net/search?ln=en&p=Kinney:1995cc&of=hd]]``` .

The quantities we are interested in for constraining models of inflation are the primordial power spectra \(P_{\cal R}\) and \(P_T\), which are the underlying source of the CMB temperature anisotropy and polarization. However, the observed CMB  anisotropies depend on a handful of unrelated cosmological parameters, since the primordial fluctuations are processed through the complicated physics of acoustic oscillations. This creates uncertainties due to parameter degeneracies: our best-fit values for \(r\) and \(n_S\) will depend on what values we choose for the other cosmological parameters such as the baryon density \(\Omega_{\rm b}\) and the redshift of reionization \(z_{\rm ri}\). To accurately estimate the errors on \(r\) and \(n_S\), we must fit all the relevant parameters //simultaneously//, a process which is computationally intensive, and is typically approached using Bayesian Monte Carlo Markov Chain techniques  ```Lewis & Bridle (2002)  [[Cosmological parameters from CMB and other data: A Monte Carlo approach|http://inspirehep.net/search?ln=en&p=Lewis:2002ah&of=hd]]``` . Here we simply show the results as the regions of the \(r\), \(n_S\) parameter space allowed by the WMAP 5-year data set  ```Kinney, //et al.,// (2006)  [[Inflation model constraints from the Wilkinson Microwave Anisotropy Probe three-year data|http://inspirehep.net/search?ln=en&p=Kinney:2006qm&of=hd]]```  ```Kinney, //et al.,// (2008)  [[Latest inflation model constraints from cosmic microwave background measurements|http://inspirehep.net/search?ln=en&p=Kinney:2008wy&of=hd]]``` . We have fit over the parameters \(\Omega_{\rm CDM}\), \(\Omega_{\rm b}\), \(\Omega_{\rm Lambda}\), \(H_0\), \(P_{\cal R}\), \(z_{\rm ri}\), \(r\), and \(n_s\), with a constraint that the universe must be flat, as predicted by inflation, \(\Omega_{\rm b} + \Omega_{\rm CDM} + \Omega_{\rm Lambda} = 1\). We see that the data favor a red spectrum, \(n_S < 1\), although the scale-invariant limit \(n_S = 1\) is still within the 95\%-confidence region.  Our example inflation model \(V\left(\phi\right) = \lambda \phi^4\) is convincingly ruled out by WMAP, but the simple potential \(V\left(\phi\right) = m^2 \phi^2\) is nicely consistent with the data.```Liddle and Leach point out that \(\lambda \phi^4\) models are special because of their reheating properties, and should be more accurately evaluated at \(N = 64\)  (Liddle & Leach (2003)  [[How long before the end of inflation were observable perturbations produced?|http://inspirehep.net/search?ln=en&p=Liddle:2003as&of=hd]]) . However, this assumes that the potential has no other terms which might become dominant during reheating, and in any case is also ruled out by WMAP5.``` There is no evidence in the WMAP data for a nonzero tensor/scalar ratio \(r\), with a 95\%-confidence upper limit of \(r < 0.5\). It is possible to improve these constraints somewhat by adding other data sets, for example the ACBAR high-resolution CMB anisotropy measurement  ```Reichardt, //et al.,// (2009)  [[High resolution CMB power spectrum from the complete ACBAR data set|http://inspirehep.net/search?ln=en&p=Reichardt:2008ay&of=hd]]```  or the Sloan Digital Sky Survey  ```Loveday (2002)  [[The Sloan Digital Sky Survey|http://inspirehep.net/search?ln=en&p=Loveday:2002ax&of=hd]]```  ```Abazajian, //et al.,// (2009)  [[The Seventh Data Release of the Sloan Digital Sky Survey|http://inspirehep.net/search?ln=en&p=Abazajian:2008wr&of=hd]]``` , which improve the upper limit on the tensor/scalar ratio to \(r < 0.3\) or so. Current data are completely consistent with Gaussianity and adiabaticity, as expected from simple single-field inflation models. In the next section, we discuss the outlook for future observation. 

<<tiddler [[fig:WMAPrn]]>>

<<tiddler [[fig:WMAPrnlog]]>>
The basic hot Big Bang scenario, in which the universe arises out of a hot, dense, smooth initial state and cools through expansion, is now supported by a compelling set of observations, including the existence of the Cosmic Microwave Background, the primordial abundances of the elements, and the evolution of structure in the universe, all of which are being measured with unprecedented precision. However, this scenario leaves questions unanswered: Why is the universe so big and so old? Why is the universe so close to geometrically flat? What created the initial perturbations which later collapsed to form structure in the universe? The last of these questions is particularly interesting, because recent observations of the CMB, in particular the all-sky anisotropy map made by the landmark WMAP satellite, have directly measured the form of these primordial perturbations.  A striking property of these observed primordial perturbations is that they are correlated on scales larger than the cosmological horizon at the time of last scattering. Such apparently //acausal// correlations can only be produced in a few ways  ```Spergel & Zaldarriaga (1997)  [[CMB polarization as a direct test of inflation|http://inspirehep.net/search?ln=en&p=Spergel:1997vq&of=hd]]``` :

* Inflation.
* Extra dimensions  ```Khoury, //et al.,// (2001)  [[The Ekpyrotic universe: Colliding branes and the origin of the hot big bang|http://inspirehep.net/search?ln=en&p=Khoury:2001wf&of=hd]]``` . 
* A universe much older than \(H_0^{-1}\)  ```Khoury, //et al.,// (2004)  [[Designing cyclic universe models|http://inspirehep.net/search?ln=en&p=Khoury:2003rt&of=hd]]```  ```Brandenberger & Shuhmaher (2006)  [[The Confining heterotic brane gas: A Non-inflationary solution to the entropy and horizon problems of standard cosmology|http://inspirehep.net/search?ln=en&p=Brandenberger:2005qj&of=hd]]``` .
* A varying speed of light  ```Albrecht & Magueijo (1999)  [[A Time varying speed of light as a solution to cosmological puzzles|http://inspirehep.net/search?ln=en&p=Albrecht:1998ir&of=hd]]``` .
 

In addition, the WMAP data contain spectacular confirmation of the basic predictions of the inflationary paradigm: a geometrically flat universe with Gaussian, adiabatic, nearly scale-invariant perturbations. No other model explains these properties of the universe with such simplicity and economy, and much attention has been devoted to the implications of WMAP for inflation  ```Spergel, //et al.,// (2007)  [[Wilkinson Microwave Anisotropy Probe (WMAP) three year results: implications for cosmology|http://inspirehep.net/search?ln=en&p=Spergel:2006hy&of=hd]]```  ```Alabidi & Lyth (2006)  [[Inflation models after WMAP year three|http://inspirehep.net/search?ln=en&p=Alabidi:2006qa&of=hd]]```  ```Seljak, //et al.,// (2006)  [[Cosmological parameters from combining the Lyman-alpha forest with CMB, galaxy clustering and SN constraints|http://inspirehep.net/search?ln=en&p=Seljak:2006bg&of=hd]]```  ```Kinney, //et al.,// (2006)  [[Inflation model constraints from the Wilkinson Microwave Anisotropy Probe three-year data|http://inspirehep.net/search?ln=en&p=Kinney:2006qm&of=hd]]```  ```Martin & Ringeval (2006)  [[Inflation after WMAP3: Confronting the Slow-Roll and Exact Power Spectra to CMB Data|http://inspirehep.net/search?ln=en&p=Martin:2006rs&of=hd]]```  ```Lesgourgues, //et al.,// (2008)  [[What do WMAP and SDSS really tell about inflation?|http://inspirehep.net/search?ln=en&p=Lesgourgues:2007aa&of=hd]]```  ```Peiris & Easther (2008)  [[Primordial Black Holes, Eternal Inflation, and the Inflationary Parameter Space after WMAP5|http://inspirehep.net/search?ln=en&p=Peiris:2008be&of=hd]]```  ```Alabidi & Lidsey (2008)  [[Single-field inflation after the WMAP five-year data|http://inspirehep.net/search?ln=en&p=Alabidi:2008ej&of=hd]]```  ```Dunkley, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Likelihoods and Parameters from the WMAP data|http://inspirehep.net/search?ln=en&p=Dunkley:2008ie&of=hd]]```  ```Komatsu, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Cosmological Interpretation|http://inspirehep.net/search?ln=en&p=Komatsu:2008hk&of=hd]]```  ```Kinney, //et al.,// (2008)  [[Latest inflation model constraints from cosmic microwave background measurements|http://inspirehep.net/search?ln=en&p=Kinney:2008wy&of=hd]]```  ```Xia, //et al.,// (2008)  [[Determining Cosmological Parameters with Latest Observational Data|http://inspirehep.net/search?ln=en&p=Xia:2008ex&of=hd]]```  ```Hamann, //et al.,// (2008)  [[How to constrain inflationary parameter space with minimal priors|http://inspirehep.net/search?ln=en&p=Hamann:2008pb&of=hd]]```  ```Smith, //et al.,// (2008)  [[The inflationary gravitational-wave background and measurements of the scalar spectral index|http://inspirehep.net/search?ln=en&p=Smith:2008pf&of=hd]]```  ```Li, //et al.,// (2009)  [[Constraining Cosmological Parameters with Observational Data Including Weak Lensing Effects|http://inspirehep.net/search?ln=en&p=Li:2008vf&of=hd]]``` . Inflation also makes predictions which have not been well tested by current data but //can// be by future experiments, most notably a deviation from a scale-invariant spectrum and the production of primordial gravitational waves. A non-scale-invariant spectrum is weakly favored by the existing data, but constraints on primordial gravity waves are still quite poor. The outlook for improved data is promising: over the next five to ten years, there will be a continuous stream of increasingly  high-precision data made available which will allow constraint of cosmological parameters relevant for understanding the early universe. The most useful measurements for direct constraint of the inflationary parameter space are observations of the CMB, and current activity in this area is intense. The Planck satellite mission is scheduled to launch in 2009  ```:2006uk```  ```Bouchet (2007)  [[The Planck satellite: Status &amp;amp; perspectives|http://inspirehep.net/search?ln=en&p=Bouchet:2007zz&of=hd]]``` , and will be complemented by ground- and balloon-based measurements using a variety of technologies and strategies  ```Leitch, //et al.,// (2005)  [[DASI three-year cosmic microwave background polarization results|http://inspirehep.net/search?ln=en&p=Leitch:2004gd&of=hd]]```  ```Kuo, //et al.,// (2007)  [[Improved Measurements of the CMB Power Spectrum with ACBAR|http://inspirehep.net/search?ln=en&p=Kuo:2006ya&of=hd]]```  ```Sievers, //et al.,// (2007)  [[Implications of the cosmic background imager polarization data|http://inspirehep.net/search?ln=en&p=Sievers:2005gj&of=hd]]```  ```Ruhl, //et al.,// (2004)  [[The South Pole Telescope|http://inspirehep.net/search?ln=en&p=Ruhl:2004kv&of=hd]]```  ```Yoon, //et al.,// (2006)  [[The Robinson Gravitational Wave Background Telescope (BICEP): A bolometric large angular scale CMB polarimeter|http://inspirehep.net/search?ln=en&p=Yoon:2006jc&of=hd]]```  ```Taylor (2006)  [[Clover: A B-mode polarization experiment|http://inspirehep.net/search?ln=en&p=Taylor:2006jw&of=hd]]```  ```Samtleben (2008)  [[QUIET - Measuring the CMB polarization with coherent detector arrays|http://inspirehep.net/search?ln=en&p=Samtleben:2008sj&of=hd]]```  ```Crill, //et al.,// (2008)  [[SPIDER: A Balloon-borne Large-scale CMB Polarimeter|http://inspirehep.net/search?ln=en&p=Crill:2008rd&of=hd]]``` .

At the same time, cosmological parameter estimation is a well-developed field. A set of standard cosmological parameters such as the baryon density \(\Omega_{\rm b} h^2\), the matter density \(\Omega_{\rm m} h^2\), the expansion rate \(H_0 \equiv 100 h\ {\rm km/sec}\) are being measured with increasing accuracy.  The observable quantities most meaningful for constraining models of inflation are the ratio \(r\) of tensor to scalar fluctuation amplitudes, and the spectral index \(n_S\) of the scalar power spectrum. This kind of simple parameterization is at the moment sufficient to describe the highest-precision cosmological data sets. Furthermore, the simplest slow-roll models of inflation predict a nearly exact power-law perturbation spectrum. In this sense, a simple concordance cosmology is well-supported by both data and by theoretical expectation. It could be that the underlying universe really is that simple. However, the simplicity of concordance cosmology is at present as much a statement about the data as about the universe itself. Only a handful of parameters are required to explain existing cosmological data. Adding more parameters to the fit does no good: any small improvement in the fit of the model to the data is offset by the statistical penalty one pays for introducing extra parameters  ```Trotta (2007)  [[Applications of Bayesian model selection to cosmological parameters|http://inspirehep.net/search?ln=en&p=Trotta:2005ar&of=hd]]```  ```Magueijo & Sorkin (2007)  [[Occam's razor meets WMAP|http://inspirehep.net/search?ln=en&p=Magueijo:2006we&of=hd]]```  ```Parkinson, //et al.,// (2006)  [[A Bayesian model selection analysis of WMAP3|http://inspirehep.net/search?ln=en&p=Parkinson:2006ku&of=hd]]```  ```Liddle, //et al.,// (2006)  [[Cosmological model selection|http://inspirehep.net/search?ln=en&p=Liddle:2006tc&of=hd]]```  ```Liddle (2007)  [[Information criteria for astrophysical model selection|http://inspirehep.net/search?ln=en&p=Liddle:2007fy&of=hd]]```  ```Pahud, //et al.,// (2007)  [[When can the Planck satellite measure spectral index running?|http://inspirehep.net/search?ln=en&p=Pahud:2007gi&of=hd]]```  ```Linder & Miquel (2008)  [[Tainted Evidence: Cosmological Model Selection vs. Fitting|http://inspirehep.net/search?ln=en&p=Linder:2007fv&of=hd]]```  ```Liddle, //et al.,// (2007)  [[Comment on Tainted evidence: Cosmological model selection versus fitting, by Eric V. Linder and Ramon Miquel (astro-ph/0702542v2)|http://inspirehep.net/search?ln=en&p=Liddle:2007ez&of=hd]]```  ```Efstathiou (2008)  [[Limitations of Bayesian Evidence Applied to Cosmology|http://inspirehep.net/search?ln=en&p=Efstathiou:2008ed&of=hd]]``` . But the optimal parameter set is a moving target: as the data get better, we will be able to probe more parameters. It may be that a "vanilla" universe  ```Easther (2004)  [[Do we live in a vanilla universe? theoretical perspectives on wmap|http://inspirehep.net/search?ln=en&p=Easther:2003fy&of=hd]]```  of a half-dozen or so parameters will continue to be sufficient to explain observation. But it is reasonable to expect that, as  measurements improve in accuracy, we will see evidence of deviation from such a lowest-order expectation. This is where the interplay between theory and experiment gains the most leverage, because we must understand: (1) what deviations from a simple universe are predicted by models, and (2) how to look for those deviations in the data. It is of course impossible to predict which of the many possible signals (if any) will be realized in the universe in which we live. I discuss below four of the best motivated possibilities, in order of the quality of current constraints. (For a more detailed treatment of these issues, the reader is referred to the very comprehensive CMBPol Mission Concept Study  ```Baumann, //et al.,// (2009)  [[CMBPol Mission Concept Study: Probing Inflation with CMB Polarization|http://inspirehep.net/search?ln=en&p=Baumann:2008aq&of=hd]]``` .)

//Features in the density power spectrum//
Current data are consistent with a purely power-law spectrum of density perturbations, \(P(k) \propto k^{n_S - 1}\) with a "red" spectrum (\(n_S < 1\)) favored by the data at about a \(90\%\) confidence level, a figure which depends on the choice of parameter set and priors. Assuming it is supported by future data, the detection of a deviation from a scale-invariant (\(n_S=1\)) spectrum is a significant milestone, and represents a confirmation of one of the basic predictions of inflation. In slow-roll inflation, this power-law scale dependence is nearly exact, and any additional scale dependence is strongly suppressed. Therefore, detection of a nonzero "running" \(\alpha = d{n_S}/d\ln{k}\) of the spectral index would be an indication that slow roll is a poor approximation. There is currently no evidence for scale-dependence in the spectral index, but constraints on the overall shape of the power spectrum are likely to improve dramatically through measurements of the CMB anisotropy at small angular scales, improved polarization measurements, and better mapping of large-scale structure. Planck is expected to measure the shape of the spectrum with \(2 \sigma\) uncertainties of order \(\Delta n \sim 0.01\) and \(\Delta\alpha \sim 0.01\)  ```Kinney (1998)  [[Constraining inflation with cosmic microwave background polarization|http://inspirehep.net/search?ln=en&p=Kinney:1998md&of=hd]]```  ```Copeland, //et al.,// (1998)  [[Cosmological parameter estimation and the spectral index from inflation|http://inspirehep.net/search?ln=en&p=Copeland:1997mn&of=hd]]```  ```Colombo, //et al.,// (2009)  [[Cosmological parameters after WMAP5: forecasts for Planck and future galaxy surveys|http://inspirehep.net/search?ln=en&p=Colombo:2008ta&of=hd]]```  ```Adshead & Easther (2008)  [[Constraining Inflation|http://inspirehep.net/search?ln=en&p=Adshead:2008vn&of=hd]]``` . Over the longer term, measurements of 21cm radiation from neutral hydrogen promises to be a precise probe of the primordial power spectrum, and would improve these constraints significantly  ```Barger, //et al.,// (2009)  [[Inflationary Potential from 21 cm Tomography and Planck|http://inspirehep.net/search?ln=en&p=Barger:2008ii&of=hd]]``` .

//Primordial Gravitational Waves//  
In addition to a spectrum \(P_{\cal R}\) of scalar perturbations, inflation generically predicts a spectrum \(P_T\) of tensor perturbations. The relative amplitude of the two is determined by the equation of state of the fluid driving inflation,
\begin{equation}
r = 16 \epsilon
\end{equation}
Since the scalar amplitude is known from the COBE normalization to be \(P_{\cal R} \sim H^2 / \epsilon \sim 10^{-10}\), it follows that measuring the tensor/scalar ratio \(r\) determines the inflationary expansion rate \(H\) and the associated energy density \(\rho\).  Typical inflation models take place with an energy density of around \(\rho \sim (10^{15}\ {\rm GeV})^4\), which corresponds to a tensor/scalar ratio of \(r \sim 0.1\), although this figure is highly model-dependent. Single-field inflation does not make a definite prediction for the value of \(r\): while many choices of potential generate a substantial tensor component, other choices of potential result in an unobservably small tensor/scalar ratio, and there is no particular reason to favor one scenario over another.

There is at present no observational evidence for primordial gravitational waves: the current upper limit on the tensor/scalar ratio is around \(r \leq 0.3\). Detection of even a large primordial tensor signal requires extreme sensitivity. The crucial observation is detection of the odd-parity, or B-mode, component of the CMB polarization signal, which is suppressed relative to the temperature fluctuations, themselves at the \(10^{-4}\) level, by at least another four orders of magnitude. This signal is considerably below known foreground levels  ```Kogut, //et al.,// (2007)  [[Three-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Foreground Polarization|http://inspirehep.net/search?ln=en&p=Kogut:2007tq&of=hd]]``` , severely complicating data analysis. Despite the formidable challenges, the observational community has undertaken a broad-based effort to search for the B-mode, and a detection would be a boon for inflationary cosmology. Planck will be sensitive to a tensor/scalar ratio of around \(r \simeq 0.1\), and dedicated ground-based measurements can potentially reach limits of order \(r \simeq 0.01\). The proposed CMBPol polarization satellite would reach \(r\) of order \(10^{-3}\)  ```Baumann, //et al.,// (2009)  [[CMBPol Mission Concept Study: Probing Inflation with CMB Polarization|http://inspirehep.net/search?ln=en&p=Baumann:2008aq&of=hd]]```  ```Dunkley, //et al.,// (2008)  [[CMBPol Mission Concept Study: Prospects for polarized foreground removal|http://inspirehep.net/search?ln=en&p=Dunkley:2008am&of=hd]]``` , and direct detection experiments such as BBO could in principle detect \(r\) of order \(10^{-4}\)  ```Smith, //et al.,// (2006)  [[Direct detection of the inflationary gravitational wave background|http://inspirehep.net/search?ln=en&p=Smith:2005mm&of=hd]]``` .

//Primordial Non-Gaussianity//
In addition to a power-law power spectrum, inflation predicts that the primordial perturbations will be distributed according to Gaussian statistics. Like running of the power spectrum, non-Gaussianity is suppressed in slow-roll inflation  ```Maldacena (2003)  [[Non-Gaussian features of primordial fluctuations in single field inflationary models|http://inspirehep.net/search?ln=en&p=Maldacena:2002vr&of=hd]]``` . However, detection of even moderate non-Gaussianity is considerably more difficult. If the perturbations are Gaussian, the two-point correlation function completely describes the perturbations. This is not the case for non-Gaussian fluctuations: higher-order correlations contain additional information. However, higher-order correlations require more statistics and are therefore more difficult to measure, especially at large angular scales where cosmic variance errors are significant.  Current limits are extremely weak  ```Spergel, //et al.,// (2007)  [[Wilkinson Microwave Anisotropy Probe (WMAP) three year results: implications for cosmology|http://inspirehep.net/search?ln=en&p=Spergel:2006hy&of=hd]]```  ```Creminelli, //et al.,// (2006)  [[Limits on non-gaussianities from wmap data|http://inspirehep.net/search?ln=en&p=Creminelli:2005hu&of=hd]]``` , and future high angular resolution CMB maps will still fall well short of being sensitive to a signal from slow-roll inflation or even weakly //non-//slow-roll models  ```Liguori, //et al.,// (2007)  [[Temperature and Polarization CMB Maps from Primordial non-Gaussianities of the Local Type|http://inspirehep.net/search?ln=en&p=Liguori:2007sj&of=hd]]``` . It will take a strong deviation from the slow-roll scenario to generate observable non-Gaussianity. However, a measurement of non-Gaussianity would in one stroke rule out virtually all slow-roll inflation models and force consideration of more exotic scenarios such as DBI inflation  ```Silverstein & Tong (2004)  [[Scalar speed limits and cosmology: Acceleration from D-cceleration|http://inspirehep.net/search?ln=en&p=Silverstein:2003hf&of=hd]]``` , Warm Inflation  ```Moss & Xiong (2007)  [[Non-Gaussianity in fluctuations from warm inflation|http://inspirehep.net/search?ln=en&p=Moss:2007cv&of=hd]]``` , or curvaton scenarios  ```Sasaki, //et al.,// (2006)  [[Non-Gaussianity of the primordial perturbation in the curvaton model|http://inspirehep.net/search?ln=en&p=Sasaki:2006kq&of=hd]]``` .

//Isocurvature perturbations//
In a universe where the matter consists of multiple components, there are two general classes of perturbation about a homogeneous background: adiabatic, in which the perturbations in all of the fluid components are in phase, and isocurvature, in which the perturbations have independent phases. Single-field inflation predicts purely adiabatic primordial perturbations, for the simple reason that if there is a single field \(\phi\) responsible for inflation, then there is a single order parameter governing the generation of density perturbations. This is a nontrivial prediction, and the fact that current data are consistent with adiabatic perturbations is support for the idea of quantum generation of perturbations in inflation. However, current limits on the isocurvature fraction are quite weak  ```Moodley, //et al.,// (2004)  [[Constraints on isocurvature models from the WMAP first-year data|http://inspirehep.net/search?ln=en&p=Moodley:2004nz&of=hd]]```  ```Bean, //et al.,// (2006)  [[Constraining Isocurvature Initial Conditions with WMAP 3-year data|http://inspirehep.net/search?ln=en&p=Bean:2006qz&of=hd]]``` . If isocurvature modes are detected, it would rule out //all// single-field models of inflation. Multi-field models, on the other hand, naturally contain multiple order parameters and can generate isocurvature modes. Multi-field models are naturally motivated by the string "landscape", which is believed to contain an enormous number of degrees of freedom. Another possible mechanism for the generation of isocurvature modes is the curvaton mechanism, in which cosmological perturbations are generated by a field other than the inflaton  ```Lyth, //et al.,// (2003)  [[The Primordial density perturbation in the curvaton scenario|http://inspirehep.net/search?ln=en&p=Lyth:2002my&of=hd]]```  ```Lyth & Wands (2003)  [[The CDM isocurvature perturbation in the curvaton scenario|http://inspirehep.net/search?ln=en&p=Lyth:2003ip&of=hd]]``` .

The rich interplay between theory and observation that characterizes cosmology today is likely to continue for the foreseeable future. As measurements improve, theory will need to become more precise and complete than the simple picture of inflation that we have outlined in these lectures, and single-field inflation models could yet prove to be a poor fit to the data. However, at the moment, such models provide an elegant, compelling, and (most importantly) scientifically useful picture of the very early universe.
A bet with Cliff Burgess from McMaster University on whether or not the [[Planck Satellite|http://www.esa.int/Our_Activities/Space_Science/Planck]] will detect non-Gaussianity:

[img(100%,auto)[./images/KinneyBurgessBet.png]]

Math on this page is displayed using [[MathJax|http://www.mathjax.org/]], which runs using JavaScript and should work well without any additional configuration of your browser. 

Here is some math typesetting to test your browser with this Wiki:

\begin{equation}
{\cal L} = - f^{-1}\left(\phi\right) \sqrt{1 + f\left(\phi\right) g^{\mu\nu} \partial_\mu \phi \partial_\nu \phi} + f^{-1}\left(\phi\right) - V\left(\phi\right)
\end{equation}

\begin{equation}
{}^\ell \lambda\left(\phi\right) \equiv \left(\frac{2 M_P^2}{\gamma\left(\phi\right)}\right)^{\ell} \left(\frac{H'\left(\phi\right)}{H\left(\phi\right)}\right)^{\ell - 1} \frac{1}{H\left(\phi\right)} \frac{d^{\ell + 1} H\left(\phi\right)}{d \phi^{\ell + 1}}
\end{equation}

\begin{equation}
\eta_{\mu\nu} = \left(\begin{array}{cccc}
1& & & \\
 &-1& & \\
 & &-1& \\
 & & &-1
\end{array}\right)
\end{equation}

The above equations should look like this:

[img[Equation Test|./images/equationtest.png]]
[<img[Photo Credit: Doug Benz for the New York Times | WHKsmall.jpg]]

I am an Associate Professor of Physics at the [[University at Buffalo, The State University of New York|http://www.physics.buffalo.edu]]. 

I study the structure and origin of the universe. While this might seem to be a subject more suited to a philosopher than a physicist, an array of amazing new techniques in astronomy and particle physics have opened an unprecedented window onto the nature of the first moments of time.

Robotic telescopes have made possible surveys which are in the process of creating the first three-dimensional maps of the universe on scales of billions of light years. Super-cold satellite detectors in orbits beyond the moon are making high-precision measurements of patterns in the faint glow of light left over from the Big Bang, called the Cosmic Microwave Background. Hidden in these patterns of light and matter are clues to the nature of the universe at its very beginning, in the hot and very dense soup of the Big Bang. To understand the conditions near the Big Bang, we must understand physics at extreme energies. These conditions are studied in particle accelerators such as the Large Hadron Collider, which is beginning operation this year in Geneva, Switzerland.

Motivated by particle physics, the leading theory of the very early universe is known as inflation. Inflation proposes that about a trillionth of a trillionth of a trillionth of a second after the beginning of time, the universe underwent a period of geometrically multiplying expansion, so rapid that "virtual" particles were ripped out of the quantum vacuum of empty space and pulled apart faster than the speed of light. These quantum fluctuations in the earliest moments of the universe left behind echoes which we can measure today in the patterns of galaxies in space, and in the light left over from the Big Bang. This new cosmology is a bridge between the Outer Space of stars and galaxies, and the Inner Space of fundamental particles and forces. We are learning amazing things about both.

For contact information, see my [[University Home Page|http://www.physics.buffalo.edu/whkinney/]].
[img[./images/Inflation/nsf4c.jpg]]

This material is based upon work supported by the National Science Foundation under Grant No. 1066278. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the National Science Foundation.
[[Ptolemy's|http://en.wikipedia.org/wiki/Ptolemy]] universe was one of order. Informed by Aristotle's formulation of physics, in which the natural state of a material body is at rest, Ptolemy placed the Earth at the center of the universe, a "bottom" at which all things material collect.

[img[Ptolemaic Universe|./images/PtolemaicSystem.jpg]]

Ptolemy himself was not Christian, but his model was adopted enthusiastically by the Roman Catholic Church, particularly in the writings of Thomas Aquinas. In Aquinas' vision, above the Earth is the realm of the celestial spheres, perfect, inhabited by God and by the angels. Below the earth is Hell, to which all things sullied and evil sink. In between are humans, material creatures in an imperfect world, torn between divinity and sin. Our position in the cosmos makes us //special// creatures, made in the image of God, and capable of transcending the imperfection of Earth and achieving the perfection of Heaven. The specialness of the Earth, and by extension of us, is not an accident. In Ptolemy's universe, our distinctive place is built into the structure of the universe itself, as Aquinas put it, "God-ordained and man-centered." The Earth is singular, the sum total of all that is material, since the laws of physics require that all material things fall to rest at the center of the cosmos. Ptolemy's universe is also inherently hierarchical. ethereal Heaven and material Earth are separate, and governed by different laws. God, in the perfect realm of Heaven, rules the imperfect Earth. I suspect that it is no accident that the social and political structures of the Middle Ages reflected the hierarchical cosmology of Ptolemy. The dominion of the Church and divine right of kings were mirrors of the structure of the cosmos itself. 

Copernican ideas challenged that order. In his 1584 work //On the Infinite Universe and Worlds//, the Copernican scientist and mystic [[Giordano Bruno|http://en.wikipedia.org/wiki/Giordano_Bruno]] wrote:

//God is infinite, so His universe must be too...He is glorified not in one, but in countless suns; not in a single earth, a single world, but in a thousand thousand, I say in an infinity of worlds.//

It is hard to imagine coming from a modern perspective how such a pious argument could have been considered so heretical, and dangerous. But if the social and political order in the material world derives its legitimacy from the inherent order of the universe itself, one world under God, then the idea of an //infinity// of worlds utterly destroys that order, and by extension the very foundation of medieval society. Copernicanism was in fact deeply radical, and Bruno was burned at the stake for heresy by the Inquisition in 1600. At his sentencing, understanding the situation perfectly, he [[told the judges|http://en.wikiquote.org/wiki/Giordano_Bruno]], "Perchance you who pronounce my sentence are in greater fear than I who receive it". Their world was about to collapse. 

If Bruno was right and we are one of an infinity of worlds, we are not special, since in an infinity of worlds, there will certainly be an infinity of civilizations, even an infinity of //other Earths//, identical to our own. In a truly infinite universe, we are as individuals not even unique manifestations of ourselves, but one of an infinity of copies. Bruno's infinity of worlds has a spectacular realization in modern cosmology in the idea of [[eternal inflation|http://en.wikipedia.org/wiki/Chaotic_inflation]]. The eternally inflating universe is something like a glass of beer, a smooth fluid full of tiny bubbles. Inside each bubble is an //infinite// universe. The fluid itself is infinitely large, and contains an infinite number of bubbles, an infinity of infinities. 

[img[Beer bubbles|./images/BeerBubbles.jpg]]

Everything we see, or ever //can// see, is inside one bubble, but there exist an infinity of other universes, each forever separate from ours. (Sometimes two of these bubbles can collide, and astronomers are now [[looking for evidence|http://arxiv.org/abs/1012.3667]] of such collisions!)

Even stranger, theory suggests that the laws of physics will be different in each bubble. Different "laws" of physics can come about because of variations of fundamental dimensionless numbers in physics, such as the [[fine structure constant|http://en.wikipedia.org/wiki/Fine-structure_constant]] governing the strength of electromagnetism, \(\alpha = 1/137\), or the value of the cosmological constant expressed in Planck units, \(\Lambda / M_{\rm P}^4 \sim 10^{-120}\), which is surely the weirdest small number in the history of physics. There are many other examples. The [[Standard Model of particle physics|http://en.wikipedia.org/wiki/Standard_Model]] contains 19 "free" parameters, which are not explained by the theory and must be determined by experiment. Extensions to the Standard Model based on [[supersymmetry|http://en.wikipedia.org/wiki/Supersymmetry]] contain hundreds. Nobody really knows why these fundamental numbers have the values they do. 

The so-called [[Anthropic Principle|http://www.anthropic-principle.com/]]  suggests that the values of fundamental constants are what they are because of a //selection effect//: if they were different, we would not be here to measure them. This idea is currently very fashionable in cosmology, being advocated by such notable scientists as [[Alan Guth|http://www.slac.stanford.edu/spires/find/hep/www?rawcmd=find+a+guth%2C+a&FORMAT=WWW&SEQUENCE=]], [[Alexander Vilenkin|http://www.slac.stanford.edu/spires/find/hep/www?rawcmd=FIND+A+vilenkin%2C+a&FORMAT=www&SEQUENCE=]], and [[Andre Linde|http://www.slac.stanford.edu/spires/find/hep/www?rawcmd=FIND+A+linde%2C+a&FORMAT=www&SEQUENCE=]]. Two developments in theoretical physics have led to a recent fad for the Anthropic Principle in physics:

(1) The concept of a //multiverse//, that what we call our universe is actually one of many, which is a natural consequence of cosmological inflation. 

(2) The //string landscape//, which is the realization that string theory contains an enormous number of stable states for the vacuum, estimated to be as large as \(10^{500}\). Each different stable vacuum in the landscape will have different symmetries and couplings for fundamental particles. 

If these two ideas are correct, then the consequence is that there is a huge population of universes out there, each with its own "fundamental" physical laws.  However, in that plethora of options, life will find itself existing only in universes with physical constants compatible with the existence of life. This will be true even if the probability of finding a universe compatible with life is vanishingly small. Hence we do not need to explain //why// the fine structure constant is what it is. In fact, there is no explanation. 

I take issue with the Anthropic argument for three reasons:

(1) It is not a scientific argument, because it is neither predictive nor testable. We cannot, even //in principle//, observe the distribution of fundamental constants in other universes, or even determine whether or not such universes  exist. 

(2) The Anthropic Principle contains no prescription for deciding which properties of the universe are anthropically selected, and which are not. Which constants to we allow to vary from universe to universe, and which are fundamental and do not vary?

(3) The Anthropic Principle is based on a very narrow-minded concept of "life", and of what physical conditions make life possible. We currently know of only one example of an environment containing life, which introduces a massive bias in how we conceive of what makes life possible. 

Let me discuss each of these objections in turn. 

First, the Anthropic Principle is not science. Science is the process of finding natural explanations for things we do not understand. By contrast, the Anthropic Principle is an assertion that no explanation of certain phenomena is possible, even in principle. In this sense, it is an abdication of science, and is a close cousin to the idea of [[Irreducible Complexity|Irreducible Complexity, Science and Atheism]]. The basic syllogisms of Irreducible Complexity and the Anthropic Principle are eerily identical, save for the final conclusion:

(1) I do not understand phenomenon //X//.

(2) Nobody will ever understand phenomenon //X//. 

(3) (a) Therefore God did it.
(3) (b) Therefore //X// was selected anthropically from a multiverse. 

Consider the fine structure constant again as an example: a devotee of Irreducible complexity might hold that the apparent fine-tuning of \(\alpha\) is evidence for deliberate design in the universe: it is as if the fundamental laws of physics were deliberately set to make our existence possible. The Anthropic Principle likewise links the value of the fine structure constant to our existence: because carbon-based life would not exist in a universe with a different value for \(\alpha\), the apparent fine-tuning is evidence for Anthropic selection in a multiverse. This is terrifyingly specious logic, and I am amazed that otherwise credible scientists take it seriously. To accept either syllogism requires us to be capable of inferring a  probability distribution from a single instance of that distribution. The only difference between Irreducible Complexity and the Anthropic Principle is what one assumes about the //shape// of the probability distribution. Irreducible Complexity assumes that the probability distribution is infinitely localized around a single value, which can only be explained by intelligent design. The Anthropic Principle assumes that the probability distribution is perfectly flat, and the observed value can only be explained by the existence of intelligent life to observe it. Neither assumption is based on anything except blind faith. 

It is often argued (for example by [[Steven Weinberg|http://arxiv.org/abs/hep-th/0511037]]) that the Anthropic Principle is in fact just Darwin's idea of Natural Selection writ large: just as life itself arises through random mutation, the laws of physics themselves are manifestations of a random process. It is true that Natural Selection, like the Anthropic Principle, contains randomness as a central feature. However, Natural Selection contains a crucial ingredient which the Anthropic Principle lacks: a measure of fitness. Successful traits reproduce more efficiently than unsuccessful ones. The efficiency of replication is a measure (in fact, the sole measure) of  fitness in evolution. By contrast, the Anthropic Principle proposes no such measure of the "fitness" of the universe. Quite the opposite: it proposes on fundamental grounds that there is no such principle, and grafts on a selection effect //a posteriori//. (Attempts have been made at constructing a pseudo-Darwinian measure of fitness for cosmology, most notably by [[Lee Smolin|http://www.edge.org/3rd_culture/smolin_susskind04/smolin_susskind.html]], but the results are less than compelling.) The Anthropic Universe is ultimately wasteful and undirected, and we occupy a vanishingly unlikely oasis in an inconceivably huge desert of mute, lifeless universes. Perhaps this is true, but when all we can see is that single oasis, it is an incredible stretch to conclude that the desert must exist. 

My second objection to the Anthropic Principle lies in the assumption of what is allowed to vary from universe to universe, and what is not. For example, why do the fine structure constant or the cosmological constant randomly vary from universe to universe, but \(\pi\) or Napier's constant do not? In a straw poll of my colleagues in cosmology, I have asked the question: "if you can imagine a universe in which the laws of physics are different, can you imagine a universe in which the laws of //mathematics// are different?" The answer I receive overwhelmingly is "No". Mathematics is believed to be universal in a way that physics is not. We can accept the idea of a universe in which the strength of electromagnetism is different, but we cannot even imagine a universe in which there are no prime numbers. The problem is that nobody really knows where to draw the line. Which properties of our universe are truly "universal", and true in every instance of the multiverse, and which are variable and subject to anthropic selection? What principle guides the distinction? This is no small problem. Consider the odd fact that 
\begin{equation}
9876543210/1234567890 = 8.0000000073
\end{equation}
Why is this odd fraction equal to an integer to eight significant digits?  How can we explain such a strange apparent "fine tuning" in the universe? Perhaps a proponent of Intelligent Design would consider this evidence for God. Of course there is a much more mundane mathematical explanation for this "coincidence", which I leave for the reader to discover.* The point is that things which might //appear// to be odd coincidences or fine-tuning may in fact have perfectly understandable explanations. The fact that we do not have a perfectly understandable explanation for a particular "cosmic coincidence" cannot be taken as evidence that no such explanation exists. 

My third objection to the Anthropic Principle concerns the underlying assumption about the conditions necessary for life. Ironically, although the Anthropic Principle relies on universes being inconceivably numerous, it simultaneously relies on life being exceptionally rare. If life were common, there would be no selection effect which would bias our observations toward universes with certain well-defined physical properties. The problem is that we have no idea how common life is in our own universe, much less how common it is likely to be in a multiverse with a wide distribution of physical laws. We have one example of one planet with life, and the logic of the Anthropic Principle requires the wild extrapolation that //any// life must be more-or-less similar to us. We are biochemical machines, built out of atoms, with structure determined primarily by electromagnetism. In a cosmic sense, we are incredibly fragile: we can only survive in a very special environment, with a very narrow range of temperature, protection from cosmic radiation, liquid water, and so forth. Life like us, even if it is widespread in the universe, will only be found on little rocks that happen to be just the right distance from stable, long-lived stars. In a universe with no stars, or no little rocks, or no complex molecules, there will be no life like us. 

But is "life like us" the only possibility for how one might build life? There is no particular reason to think so. Certainly Natural Selection does not contain biochemistry as a fundamental assumption. //Any// system with sufficient complexity to self-replicate will be subject to Natural Selection. Furthermore, Natural Selection tells us that life will automatically optimize itself to suit the environment in which it evolves. We are highly specialized to survive on a little ball of rock with liquid water because we //evolved// on a little ball of rock with liquid water. Life which evolves in radically different circumstances will be specialized to survive in those circumstances, and might not look anything even remotely like us. We simply have no data on the environments in which other life forms can (or do) exist. One thing, however, that is being realized in modern biology is that even our narrow, carbon-based definition of life is astoundingly flexible: so-called [[extremophile|http://en.wikipedia.org/wiki/Extremophile]] life exists in environments so harsh that they were previously supposed to be lifeless. Bacteria may well be widespread in the solar system, not just on earth. The one lesson we are finding over and over is that life is more adaptable and more ubiquitous than we ever previously expected. My guess (and it is only a guess) is that someday, when we understand life better, we will find that life in the universe is not rare, but is ubiquitous, and fills a huge variety of wildly implausible niches. Life on the Sun? It seems incredibly unlikely, but is it //impossible//? Maybe we just haven't looked for it in the correct way. How about organisms comprised only of dark matter or black holes, based on gravity instead of electromagnetism? Why not? How about organisms based on quantum gravity, capable of evolution into complex, even intelligent life forms in the span of a femtosecond? Why not? Such life forms could populate a universe with a lifetime so short that a naive application of the Anthropic Principle would rule it out as "suitable for life". If we embrace the idea of the multiverse, any environment with sufficient complexity to develop life will be replicated exponentially many times, and therefore if it is possible for Natural Selection to take hold in a given environment, it inevitably will. The idea of a multiverse is by its nature incompatible with the idea of the rarity of life, but the Anthropic Principle requires both in order to make any sense.

Put another way, the Anthropic principle depends crucially on the assumption that we are in some deep and fundamental way //special//. Only a vanishingly rare set of circumstances will create beings such as us, so that we must have a universe finely tailored to our existence. Not only is this an embarrassingly anthropocentric notion, it clashes with the other great idea in cosmology, which dates back in its earliest form to Copernicus: the Cosmological Principle. Copernicus' radical idea was that the Earth enjoys no special position in the cosmos, and is just one of many planets. Modern cosmology extends this by realizing that the Sun is but one of many stars in a galaxy which is one of many galaxies in a universe which may be just one of many universes. The Cosmological Principle, in a nutshell, is that we are //ordinary//, and it is this very ordinariness that forms the fundamental organizing idea of modern cosmology. 

* Thanks to Greg Landsberg from Brown University for this cool brain teaser. 
''Moon with Earthlight, 2017 April 29:''

Camera: Sony SLT-A33 w/300 mm lens, ISO 1600, F/4.5, 1/4 second exposure.

[img(100%,auto)[Image|./images/Astro/Moon2017.04.29.png]]

''Moon and Saturn, 2017 March 20:''

Camera: Sony SLT-A33 300mm F/4.5. 

[img(100%,auto)[Image|./images/Astro/MoonAndSaturn2017.03.20.png]]

''Sun Dog, 2017 March 5:''

Camera: Android Moto-X phone. 

[img(100%,auto)[Image|./images/Astro/SunDog.2017.03.05.jpg]]

''Orion Nebula, 2017 February 19:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 at prime focus with Astronomik CLS filter

[img(100%,auto)[Image|./images/Astro/Orion.2017.02.19.jpg]]

''Venus, 2017 February 19:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 (Eyepiece projection, 30mm ocular).

[img(100%,auto)[Image|./images/Astro/Venus2017.02.19.jpg]]

''22 Degree Halo around Moon, January 2017''

Camera: Sony SLT-A33 30-50mm Zoom (30mm)

[img(100%,auto)[Image|./images/Astro/MoonHalo2017.01.jpg]]



''Andromeda Rising, 2016 August 8:''

Camera: Sony SLT-A33 18-50mm Zoom (18mm)
ISO: 16500
Exposure: 24 frames, 1/30 second exposure each

[img(100%,auto)[Image|./images/Astro/Andromeda_2016.08.09.gif]]

''Transit of Mercury, 2016 May 9:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Solar Filter: Baader full aperture
Camera: Sony SLT-A33 at prime focus
ISO: 100
Exposure: 28 frames, 1/800 second exposure each

[img(100%,auto)[Image|./images/Astro/MercuryTransit2016.png]]

''Lunar Eclipse, 2015 September 27:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 at prime focus
ISO: 12800
Exposure: 3 frames, 0.5 second exposure each

[img(100%,auto)[Image|./images/Astro/Eclipse2015.09.27.png]]

''Neptune, 2013 October 12:''

Telescope: Celestron 8 (2032mm F/10)
                  Eyepiece projection, 32mm Celestron Pl&ouml;ssl eyepiece.
Camera: Sony SLT-A33 
ISO: 800
Exposure: 4 frames averaged, 1/5 second exposure each

[img(100%,auto)[Image|./images/Astro/Neptune2013.10.12.jpg]]

''Planetary Nebula M57, 2013 September 24:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 at prime focus with Astronomik CLS filter
ISO: 1600
Exposure: 16 frames, 15 second exposure each

[img(100%,auto)[Image|./images/Astro/M57.2013.09.24.jpg]]

''Globular Cluser M13, 2013 September 23:''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 at prime focus with Astronomik CLS filter
ISO: 1600
Exposure: 8 frames, 15 second exposure each

[img(100%,auto)[Image|./images/Astro/M13.2013.09.23.jpg]]

''Moon, 2012 November 17 :''

Telescope: Celestron 8 (2032mm F/10) with F/6.3 Focal Reducer
Camera: Sony SLT-A33 at prime focus
ISO: 100

This photo is unusual in that it was taken between the total solar eclipse of 2012 November 13 and the total lunar eclipse of 2012 November 28. 

[img(100%,auto)[Image|./images/Astro/Moon2012.11.17.jpg]]

''Transit of Venus, 2012 June 5:''

Telescope: Celestron 8 (2032mm F/10)
Solar Filter: Baader full aperture
Camera: Sony SLT-A33 at prime focus
ISO: 100
Exposure: 1/250 sec
Latitude: 41.808047
Longitude: -87.935247

[img(100%,auto)[Image|./images/Astro/23.22.27.jpg]]

[img(100%,auto)[Image|./images/Astro/00.05.36.jpg]]

[img(100%,auto)[Image|./images/Astro/Transit2012Warhol.jpg]]

A python script for parsing LaTeX files and auto-generating BibTeX bibliography entries from SPIRES-format citations. If given an existing bibliography file, GetBib.py will append new references to the file.  Requires [[SPIRES.py|Retrieving Bibliography Data from SPIRES]] and [[pybtex|http://pybtex.sourceforge.net/]].

Usage:
{{{python ./GenBib.py -i <input TeX file> --bib <BibTeX bibliography file> [--verbose]}}}

GenBib.py:
{{{
#!/usr/bin/python

import sys, re
import StringIO
from optparse import OptionParser
from SPIRES import get_BibTeX, is_SPIRES_format, MakeBibEntry


#################################################################################################
#                                                                                               #
# Routines for extracting citations from a LaTeX file and generating a BibTeX-format            #
# bibliography. References are automatically looked up on SPIRES                                #
#                                                                                               #
# Author: Will Kinney (Univ. at Buffalo, SUNY)                                                  #
# License: Attribution-NonCommercial-ShareAlike 3.0 United States (CC BY-NC-SA 3.0)             #
#          <http://creativecommons.org/licenses/by-nc-sa/3.0/us/>                               #
#                                                                                               #
#################################################################################################


#
# Generates a BibTeX bibliography entry from a citation in the text. 
#
def AddToBib(ref,Bib=None,check_SPIRES=True,verbose=False):

	existing = get_BibTeX(ref,Bib,prefer_SPIRES=False)
	if existing == None:
		BibTeX_data = get_BibTeX(ref,None)
		if BibTeX_data: # Parse BibTeX entry
			if Bib:
				if verbose==True:
					print "Adding:\n",MakeBibEntry(ref,BibTeX_data)
				Bib.seek(0,2) # Seek to end of file for append
				Bib.write(MakeBibEntry(ref,BibTeX_data))
			else:
				print MakeBibEntry(ref,BibTeX_data)
		else:
			print "Unresolved reference: ",ref
	


#
# Extracts citations from a text stream and generates bibliography entries
#
def MakeBib(stream,Bib,verbose=True):

	text = stream.readline()
	while text:

		#
		# Parse citations in the paragraph. 
		#
		p = re.compile(r"\\cite\{([\w\:\+\=\-\_,]*?)\}")
		match=p.search(text)
		while match:
			ref = match.group(1)
			cites = ref.rsplit(',') # Split multiple references
			for cite in cites:
				AddToBib(cite,Bib,verbose=verbose) # Add citation to bibliography file
			text = p.sub(' ',text,1) # Overwrite citation with blank
			match=p.search(text) # Search for next

		text = stream.readline()
	
	if verbose==True:
		print "\nBibliography is up-to-date.\n"


#################################################################################################
#                                                                                               #
# Main module                                                                                   #
#                                                                                               #
#################################################################################################

#
# Parse LaTeX File into a BibTeX format bibliography, looking the references up on SPIRES.
#
def GenBib(InFile=None,BibFile=None,verbose=False,debug=False):

	if InFile==None:
		print "Must specify an input file."
		return(False)

	if verbose==True:
		print "Adding ",InFile," to bibliography",BibFile,"\n"

	in_stream=open(InFile)
				
	if BibFile:
		Bib=open(BibFile,'a+')
	else:
		Bib=None
	
	MakeBib(in_stream,Bib,verbose=verbose)

	if BibFile:
		Bib.close()

	in_stream.close()


	return(True)


if __name__ == "__main__":

    # Parse commandline arguments if this module is run as a script.
	parser = OptionParser()
	parser.add_option('-i', '--input',                     
						action='store', 
						dest='infile', 
						help='File containing TeX souce from which to extract bibliography entries')
	parser.add_option('-o', '--output',
						action='store', 
						dest='BibFile',
						help='BibTeX file for output')
	parser.add_option('--bib', '-b', 
						action='store', 
						dest='BibFile', 
						default=None,
						help='BibTeX file for output')
	parser.add_option('--verbose', '-v',
						action='store_true', 
						dest='verbose', 
						default='False',
						help='Turns on verbose output')
	parser.add_option('--debug',
						action='store_true', 
						dest='debug', 
						default='False',
						help='Turns on debugging output')

	(options, args) = parser.parse_args()

	GenBib(InFile=options.infile,
			BibFile=options.BibFile,
			verbose=options.verbose,
			debug=options.debug)

}}}
This semester I am teaching an introductory physics course, which aims to teach the basics of physics without calculus. The students are pretty much all non-physics majors, many of whom are pre-med and pre-pharmacy who need a physics class to pass the MCAT. Many, if not most, of the students in the class find physics confusing and intimidating. Unfortunately, we do little to dispel that impression, and a lot to reinforce it. Here is one case in point, problem 6.11 from Giordano, //College Physics: Reasoning and Relationships//, First Edition (copyright 2010, Brooks Cole, Cengage Learning, reproduced under [[fair use|http://www.copyright.gov/fls/fl102.html]]):

//Two railroad cars, each of mass \(2.1 \times 10^4\  {\rm kg}\), are connected by a cable to each other, and the car in front is connected by a cable to the engine as shown in the figure below. The cars start from rest and accelerate to a speed of \(1.9\  {\rm m/s}\) after \(1 {\rm min}\). //

[img[Giordano 6.11|./images/Giordano6.11.gif]]

//(a) Find the work done by cable 1 on the car in the back. // ''(Answer:  37900 Joules)''
//(b) Find the work done by cable 1 on the car in front. //  ''(Answer:  -37900 Joules)''
//(c) Find the work done by cable 2 on the car in front. //  ''(Answer:  75800 Joules)''

As far as I can tell, the point of the problem is to apply the concept that the work is given by the force times the displacement,
\begin{equation}
W = F \Delta x
\end{equation}
So what's wrong with this problem? Everything. 

Note what is given in the problem is the change in velocity of the train. Since there is no change in potential energy in the problem, the total work done on each car is the same,
\begin{equation}
W = \Delta ({\rm K.E.}) = \frac{1}{2} m (\Delta v)^2 = 37900\ {\rm J}
\end{equation}
The total work done on the two cars together is twice that done on either car alone, \(W_{\rm tot} = 75800\ {\rm J}\).  The difficulty is that the exercise does not ask for the work done //on// the car, but for the work done //by// each of the cables. According to the given solutions,  the tension in cable 1 is twice as large as the tension in cable 2, and from \(W = F \Delta x\), it is therefore be doing twice as much work, //i.e.// cable 1 is doing 75800 J of work on the car in front, and cable 2 is doing half as much, or 37900 J on the car in back. But this adds up to more than the total work done of 75800 J! What happened to conservation of energy?

The Physics 101 answer is that we must take into account the fact that cable 2 is pulling //backward// on the car in front, and is therefore "doing negative work" in an amount that exactly cancels the extra 37900 J, hence the answer to part (b) of the question. This is standard intro physics stuff.

It's also nonsense. 

The problem is that intro physics books //define// work by the formula \(W = F \Delta x\), and then prove the "work/energy theorem", which shows that the work is equal to the change in energy. The definition is abstract (and confusing), and it is only later that we relate work to a physically intuitive concept. Giordano is actually worse than this, because the work/energy theorem is initially stated so that the work is equal to the change in //kinetic// energy, not the total energy. Only later is potential energy introduced and the theorem generalized. It seems to me that a much, much, better way to approach this is to //define// work as the change in energy, and then //derive// the "work/energy theorem", \(W = F \Delta x\). This is the way physicists think about work. While the Physics 101 solution is in fact a way to apply the formula \(W = F \Delta x\) to the problem of the two train cars and get the correct answer for the total work done on the system, it is a //completely incorrect// way to think about the concept of work. Work happens when one system loses energy and another system gains energy. Since the cables themselves carry no energy, the //cables// aren't doing any work at all. Therefore, the strictly correct answer to the problem is that the work done by the cables is zero! The work is being done by the //locomotive//, which is extracting energy from burning fuel and turning that energy into kinetic energy and into heat. If we taught the students this, they would learn something about the physics of conservation of energy. Instead, what we give them is an empty exercise in accounting. 

All of this makes me think of Richard Feynman's lament about high-school physics textbooks in //Surely You're Joking, Mr. Feynman//:

//...something would look good at first and then turn out to be horrifying. For example, there was a book that started out with four pictures: first there was a windup toy; then there was an automobile; then there was a boy riding a bicycle; then there was something else. And underneath each picture it said, "What makes it go?"

I thought, "I know what it is: They're going to talk about mechanics, how the springs work inside the toy; about chemistry, how the engine of the automobile works; and biology, about how the muscles work."

It was the kind of thing my father would have talked about: "What makes it go? Everything goes because the sun is shining." And then we would have fun discussing it:

"No, the toy goes because the spring is wound up," I would say. "How did the spring get wound up?" he would ask.

"I wound it up."

"And how did you get moving?"

"From eating."

"And food grows only because the sun is shining. So it's because the sun is shining that all these things are moving." That would get the concept across that motion is simply the transformation of the sun's power.

I turned the page. The answer was, for the wind-up toy, "Energy makes it go." And for the boy on the bicycle, "Energy makes it go." For everything, "Energy makes it go."

Now that doesn't mean anything. Suppose it's "Wakalixes." That's the general principle: "Wakalixes makes it go." There's no knowledge coming in. The child doesn't learn anything; it's just a word!

What they should have done is to look at the wind-up toy, see that there are springs inside, learn about springs, learn about wheels, and never mind "energy." Later on, when the children know something about how the toy actually works, they can discuss the more general principles of energy.

It's also not even true that "energy makes it go," because if it stops, you could say, "energy makes it stop" just as well. What they're talking about is concentrated energy being transformed into more dilute forms, which is a very subtle aspect of energy. Energy is neither increased nor decreased in these examples; it's just changed from one form to another. And when the things stop, the energy is changed into heat, into general chaos.

But that's the way all the books were: They said things that were useless, mixed-up, ambiguous, confusing, and partially incorrect. How anybody can learn science from these books, I don't know, because it's not science. //

(The full text of the chapter from //SYJMF// can be found at [[The Textbook League|http://www.textbookleague.org/103feyn.htm]])
//Climate Change//

''-noun''
Used to mean "Global Warming", now means nothing. 
[img(100%,auto)[Image|./images/MengerSponge/LegoLevel3MengerSponge.jpg]]

Parts List:

Layers 1,3,7,9,19,21,25,27:
1x2: 40
1x4: 20
2x2: 54
2x4: 16
Total: 130 x 8 layers = 1040

Layers 2,8,20,26:
1x1: 40
1x2: 76
2x2: 16
Total: 132 x 4 layers = 528

Layers 4,6,22,24:
1x2: 72
1x4: 12
2x2: 16
Total: 100 x 4 layers = 400

Layers 5, 23:
1x1: 72
1x2: 28
Total: 100 x 2 layers = 200

Layers 10,12,16,18:
1x2: 32
1x4: 16
2x2: 32
Total: 80 x 4 layers = 320

Layers 11,17:
1x1: 32
2x2: 48
Total: 80 x 2 layers = 160

Layers 13, 15:
2x2: 64
Total 64 x 2 layers = 128

Layer 14:
1x1: 64
Total: 64 x 1 layer = 64

Total: Parts: 2840

[img(100%,auto)[Image|./images/MengerSponge/Level2Pattern.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer1.2.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer1.3.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer1.4.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer1.6.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer1.9.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer2.6.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/CompletedAssemblies.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer2.14.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer2.17.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/Layer2.18.jpg]]

[img(100%,auto)[Image|./images/MengerSponge/LegoLevel3MengerSponge.jpg]]
A set of scripts for parsing LaTeX files and converting them to TiddlyWiki format. Requires [[SPIRES.py|Retrieving Bibliography Data from SPIRES]], [[LaTeXBlock.py|Parsing LaTeX Files]], and [[addtiddler.py|http://scienceoss.com/insert-content-into-tiddlywikis-with-this-python-script/]].

tex2wiki.py:
{{{
#!/usr/bin/python

import sys, re
import StringIO
from optparse import OptionParser
from addtiddler import addtiddler
from LaTeXBlock import LaTeXBlock
from SPIRES import get_BibTeX, is_SPIRES_format



#
# Global variables
#
imagedir='./'
TiddlerTitle="NewTiddler"




#################################################################################################
#                                                                                               #
# Routines for converting LaTeX to Wiki format                                                  #
#                                                                                               #
# Author: Will Kinney (Univ. at Buffalo, SUNY)                                                  #
# License: Attribution-NonCommercial-ShareAlike 3.0 United States (CC BY-NC-SA 3.0)             #
#          <http://creativecommons.org/licenses/by-nc-sa/3.0/us/>                               #
#                                                                                               #
#################################################################################################

#
# Routines to convert inline LaTeX tokens to TiddlyWiki format.
#
def dummy(match):
    pass

def null(match):
	return ""

def inline_math(match):

	# Equations with primes in them will be double-quote delimited, otherwise single-quote!

	st = "\(" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\)"

	#
	# Check for text formatting tags and replace with math equivalents.
	#
	p = re.compile(r"\\bf")
	m = p.search(st)
	while m:
		st = p.sub('\\mathbf', st,1)
		m = p.search(st)

	p = re.compile(r"\\it")
	m = p.search(st)
	while m:
		st = p.sub('\\mathit', st,1)
		m = p.search(st)

	p = re.compile(r"\\em")
	m = p.search(st)
	while m:
		st = p.sub('\\mathit', st,1)
		m = p.search(st)

	p = re.compile(r"\\cal")
	m = p.search(st)
	while m:
		st = p.sub('\\mathcal', st,1)
		m = p.search(st)

	# Workaround for Mathjax bug with representing double-primes
	st = re.subn("'","' ",st)[0]

	# Workaround for Mathjax bug with representing double-brackets
	st = re.subn("{{","{ {",st)[0]

	# Workaround for Mathjax bug with representing \hdots tag
	st = re.subn("\hdots","\cdots",st)[0]


	return st
	
def title(match):
	global TiddlerTitle
	TiddlerTitle = repr(match.group(1)).strip(repr(match.group(1))[0])
	return "!" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\n"

def author(match):
	return "\nAuthor: " + repr(match.group(1)).strip(repr(match.group(1))[0])

def affiliation(match):
	return "\nAffiliation: " + repr(match.group(1)).strip(repr(match.group(1))[0])

def email(match):
	addr = repr(match.group(1)).strip(repr(match.group(1))[0])
	return "<[[" + addr + "|mailto:" + addr + "]]>"

def section(match):
	return "\n!!" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\n"

def subsection(match):
	return "\n!!!" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\n"

def subsubsection(match):
	return "\n!!!!" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\n"

def italic(match):
	return "//" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "//"

def boldface(match):
	return "''" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "''"

def emphasis(match):
	return "//" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "//"

def quote(match):
	return "\"" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "\""

def ref(match):
	return "[[[&rarr;]|" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "]]"

def url(match):
	return "[[" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "]]"

def caption(match):
	return "//" + repr(match.group(1)).strip(repr(match.group(1))[0]) + "//"

def item(match):
	return "* " + repr(match.group(1)).strip(repr(match.group(1))[0]) + '\n\n'

def footnote(match):
	return "(" + repr(match.group(1)).strip(repr(match.group(1))[0]) + ")"

def includegraphics(match):
	return "[img(100%,auto)[" + imagedir + repr(match.group(2)).strip(repr(match.group(1))[0]) + "]]"

def longhyphen(match):
	return "--"

def cite(match):
	ref = match.group(1)
	cites = ref.rsplit(',')
	s = ""
	for cite in cites:
		s = s + " ```" + gen_cite_link(cite) + "``` "
	return(s)

def hardspace(match):
	return(' ')

def smallskip(match):
	return('\n\n')

def bigskip(match):
	return('\n\n\n')

def percent(match):
	print "in percent"
	return ('%')

#
# List of regular expressions LaTeX tokens linked to routines above.
#
rule_list = [
(r"\$(.*?)\$",inline_math),
(r"\\title\{(.*?)\}",title),
(r"\\author\{(.*?)\}",author),
(r"\\affiliation\{(.*?)}",affiliation),
(r"\\email\{(.*?)\}",email),
(r"\\item\{(.*?)\}",item),
(r"\\url\{(.*?)\}",url),
(r"\\footnote\{(.*)\}",footnote),
(r"\\section\{(.*?)\}",section),
(r"\\section\*\{(.*?)\}",section),
(r"\\subsection\{(.*?)\}",subsection),
(r"\\subsubsection\{(.*?)\}",subsubsection),
(r"\\caption\{(.*)\}",caption),
(r'\\"',null), # Umlaut
(r"\`\`(.*?)\'\'",quote),
(r"\{\\it(.*?)\}",italic),
(r"\{\\bf(.*?)\}",boldface),
(r"\\textit\{(.*?)\}",italic),
(r"\{\\em(.*?)}",emphasis),
(r"\{\\\\it(.*?)\}",italic),
(r"\{\\\\bf(.*?)\}",boldface),
(r"\\\\textit\{(.*?)\}",italic),
(r"\{\\\\em (.*?)}",emphasis),
(r"\(\\ref\{([\w\:\+\=\-\_]*?)\}\)",ref),
(r"\\ref\{([\w\:\+\=\-\_]*?)\}",ref),
(r"\\label\{([\w\:\+\=\-\_]*?)\}",null),
(r"(?<!\\)\%(.*)",null),
(r"\\includegraphics\[(.*?)\]\{(.*?)\}",includegraphics),
(r"\\begin\{itemize\}",null),
(r"\\end\{itemize\}",null),
(r"---",longhyphen),
(r"\\bigskip",bigskip),
(r"\\smallskip",smallskip),
(r"\\noindent",null),
(r"\\documentclass.*",null),
(r"\\pdfoutput.*",null),
(r"\\usepackage{.*}",null),
(r"\\pacs{.*}",null),
(r"\\maketitle",null),
(r"\\ ",hardspace),
(r"~",hardspace),
(r"\\\%",percent)
]

#
# Generates an embedded tiddler reference
#
def tiddlerref(st):
	return("&lt;&lt;tiddler [[" + st + "]]&gt;&gt;")

#
# Generates a footnote from a citation in the text. 
#
def gen_cite_foot(ref,Bib=None,check_SPIRES=True):

	BibTeX_data = get_BibTeX(ref,Bib)
	if BibTeX_data: # Parse BibTeX entry
				
		title = BibTeX_data.fields['title']
		year = BibTeX_data.fields['year']
		author = BibTeX_data.persons['author']
		if (is_SPIRES_format(ref)):
			citelink = "http://inspirehep.net/search?ln=en&p=" + ref + "&of=hd"
		else:
			citelink=''

		#
		# Parse BibTeX data into a footnote. 
		#
		if len(author) == 1:
			s = author[0].last()[0]
		elif len(author) == 2:
			# Handle SPIRES-format multiple author entry
			if author[1].last()[0] == 'others':
				s = author[0].last()[0] + ", //et al.,//"
			else:
				s = author[0].last()[0] + " & " + author[1].last()[0] 
		else:
			s = author[0].last()[0] + ", //et al.,//"

		if citelink: # We have a verified good link to a SPIRES record
			s = s + ", [["+ title.strip("{}") + "|" + citelink + "]] (" + year + ")"
		else:
			journal = BibTeX_data.fields['journal']
			volume = BibTeX_data.fields['volume']
			pages = BibTeX_data.fields['pages']
			s = s +", //" + title.strip("{}") + "//, " + journal + " ''" + volume + "'', " + pages + " (" + year + ")"
	else:
		s = "Unresolved Reference: " + ref
	
	return(" ```" + parse_text(StringIO.StringIO(s.rstrip())) + "``` ")



#
# Reads a blank line delimited paragraph from an input stream.
#
def read_par(stream):
	text = ""

	st = stream.readline()
	while (st):

		#
		# Strip newlines unless we're at a paragraph boundary		
		#		
		strip = True 
		if (not st.rstrip()): # Blank line
			if (text == ""):
				st = ""
			elif (text[-1] == '\n'):
				st = '\n'
			else:
				st = "\n\n"
			strip = False

		#
		# Strip comments
		#
		p = re.compile(r"(!\\\\)\%(.*)")
		match = p.search(st)
		if match:
			st = p.sub("",st,1)
		

		#
		# Append new line to text block
		#
		if (strip): # Only include newlines at paragraph boundaries
			if (text == ""):
				text = text + st.rstrip()
			elif (text[-1] == '\n'):
				text = text + st.rstrip()
			else:
				text = text + " "  + st.rstrip()
		else:
			text = text + st

		st = stream.readline() # Read the next line

	return (text)



#
# Routine to parse equations. 
#
def parse_equation(stream,tag):

	retstr = "\\begin{" + tag + "}\n" # Return standardized equation delimiters
	labelreg = r"\\label\{([\w\:\+\=\-\_]*)\}"
	label = None
	st = stream.readline()
	while (st):
		p = re.compile(labelreg)
		match = p.search(st)

		if match:
			label = repr(match.group(1)).strip(repr(match.group(1))[0])
			spl = re.split(labelreg,st,1) # In case label is on line with other text
			st = spl[0] + spl[-1]

		# Workaround for Mathjax bug with representing double-primes
		st = re.subn("'","' ",st)[0]

		# Workaround for Mathjax bug with representing double-brackets
		st = re.subn("{{","{ {",st)[0]

		# Workaround for Mathjax bug with representing \hdots tag
		st = re.subn("\hdots","\cdots",st)[0]
		
			
		retstr = retstr + st

		st = stream.readline()

	retstr = retstr + st.rstrip() + "\\end{" + tag + "}"
	return((label,retstr))


#
# Routine to parse figures. 
#
def parse_figure(stream):
	retstr = ""
	label = ""
	
	st = read_par(stream)
	while (st):
		p = re.compile(r"\\label\{([\w\:\+\=\-\_]*)\}")
		match = p.search(st)
		if match:
			label = repr(match.group(1)).strip(repr(match.group(1))[0])

		st = parse_text(StringIO.StringIO(st))

		retstr = retstr + st
		st = read_par(stream)

	retstr = '\n\n' + retstr + '\n'
	return((label,retstr))


#
# Routine to parse itemized lists. 
#
def parse_itemize(stream):
	retstr = "\n"
	regexp = r"\\item\{(.*?)\}"
	
	st = read_par(stream)
	p = re.compile(regexp)
	match = p.search(st)
	while (match):
		item = repr(match.group(1)).strip(repr(match.group(1))[0])
		retstr = retstr + "* " + parse_text(StringIO.StringIO(item)) + '\n'
		spl = re.split(regexp,st,1)
		st = spl[-1]
		match = p.search(st)

	return(("",retstr))

#
# Routine to parse enumerated lists. 
#
def parse_enumerate(stream):
	retstr = "\n"
	regexp = r"\\item\{(.*?)\}"
	
	st = read_par(stream)
	p = re.compile(regexp)
	match = p.search(st)
	while (match):
		item = repr(match.group(1)).strip(repr(match.group(1))[0])
		retstr = retstr + "# " + parse_text(StringIO.StringIO(item)) + '\n'
		spl = re.split(regexp,st,1)
		st = spl[-1]
		match = p.search(st)

	return(("",retstr))


#
# Routine to parse generic text blocks.
#
def parse_text(stream,Bib=None):

	#
	# Read a paragraph.
	#
	text = read_par(stream)

	#
	# Parse inline LaTeX tokens in paragraph.
	#
	for reg in rule_list:
		p = re.compile(reg[0])
	
		match = p.search(text)
		while match:
			text = p.sub(reg[1](match), text,1)
			match = p.search(text)

	#
	# Parse citations in paragraph. 
	#
	p = re.compile(r"\\cite\{([\w\:\+\=\-\_,]*?)\}")
	match=p.search(text)
	while match:
		ref = match.group(1)
		cites = ref.rsplit(',') # Split multiple references
		s = ""
		for cite in cites:
			s = s +  gen_cite_foot(cite,Bib) # Generate a citation footnote 
		text = p.sub(s, text, 1)
		match=p.search(text)

	return(text)


#
# Converts a LaTeXBlock to an array of tiddlers
#
def Convert_LaTeX(block,DocTitle,Bib=None):
	tids = []

	#
	# Any commands to appear in before the text of the document. 
	#
	#DocHeader="''Table Of Contents:''\n<<showtoc>>\n"
	DocHeader=""

	#
	# First process a bibliography, if any. 
	#
	if Bib==None:
		for bl in block.GetBlocks():
			if bl.type=='thebibliography':
				Bib=StringIO.StringIO(bl.source)
	else:
		#
		# Read file into buffer and creat StringIO object. This
		# Prevents multiple file reads by lower-level routines.
		#
		buf = Bib.read()
		Bib=StringIO.StringIO(buf)

	MainText=DocHeader
	for bl in block.GetBlocks():
		st = StringIO.StringIO(bl.source)

		if bl.type == 'Text':
			Wiki = (None,parse_text(st,Bib))
		elif bl.type == 'equation':
			Wiki = parse_equation(st,bl.type)
		elif bl.type == 'eqnarray':
			Wiki = parse_equation(st,bl.type)
		elif bl.type == 'figure':
			Wiki = parse_figure(st)
		elif bl.type == 'itemize':
			Wiki = parse_itemize(st)
		elif bl.type == 'enumerate':
			Wiki = parse_enumerate(st)
		elif bl.type == 'abstract':
			Wiki = (None,"!!Abstract\n" + parse_text(StringIO.StringIO(bl.source),Bib) + '\n\n')
		elif bl.type == 'thebibliography':
			Wiki=None
		else:
			#
			# Default for unknown block types is to just pass the source
			# as plain text.
			#
			Wiki = (None,'\n' + parse_text(StringIO.StringIO(bl.source),Bib))

		if Wiki:
			label = Wiki[0]
			text = Wiki[1]
			if label:
				tids.append((label,text))
				MainText = MainText + '\n' + tiddlerref(label) + '\n'
			else:
				MainText = MainText + "" + text


	tids.append((DocTitle,MainText))
	return tids
			
	


#################################################################################################
#                                                                                               #
# Main module                                                                                   #
#                                                                                               #
#################################################################################################

#
# Parse LaTeX File into a set of TiddlyWiki tiddlers. 
#
def tex2wiki(InFile=None,WikiFile=None,ImageDir='./',DocTitle=None,BibFile=None,Author='',Tags='',verbose=False,debug=False):

	global imagedir
	global TiddlerTitle

	if InFile==None or WikiFile==None:
		print "Must specify an input file and an output file."
		return(False)

	imagedir=ImageDir	# Set global variable

	if verbose==True:
		print "Adding ",InFile," to Wiki",WikiFile

	#	
	# Read file into a LaTeXBlock object.
	#
	TeX = LaTeXBlock('Text','Top')
	in_stream=open(InFile)
	TeX.Read_Stream(in_stream)
	in_stream.close()
	
	if debug==True:
		print TeX.Flatten()
		for block in TeX.GetBlocks():
			print "=========================================="
			print "Type = ",block.type
			print "Source = ",block.source
			print "==========================================\n\n"
			
	#
	# Convert LaTeXBlock to an array of Tiddlers
	#
	if DocTitle is not None:
		TiddlerTitle = DocTitle # Allow override of document title in text

	if BibFile: # External bibliography file
		Bib=open(BibFile)
	else:
		Bib=None
	
	tiddlers = Convert_LaTeX(TeX,TiddlerTitle,Bib)

	if Bib: 
		Bib.close()

	#
	# Add tiddlers to Wiki file.
	#
	for tid in tiddlers:
		if verbose==True:
			print "Adding new tiddler: ",tid[0]
		addtiddler(WikiFile,title=tid[0],description=tid[1],replace=True,author=Author,tags=Tags)


	return(True)


if __name__ == "__main__":

    # Parse commandline arguments if this module is run as a script.
	parser = OptionParser()
	parser.add_option('-i', '--input',                     
						action='store', 
						dest='infile', 
						help='File containing TeX souce for conversion to wiki format')
	parser.add_option('-o', '--output',
						action='store', 
						dest='outfile', 
						help='Wiki file for output')
	parser.add_option('--image', 
						action='store', 
						dest='image_dir', 
						default='./',
						help='Directory in which to store images (defaults to ./)')
 	parser.add_option('--bib', '-b', 
						action='store', 
						dest='BibFile', 
						default=None,
						help='Name of BibTeX bibliography file')
	parser.add_option('--title', '-t',
						action='store', 
						dest='title', 
						default=None,
						help='Title of main tiddler')
	parser.add_option('--author', '-a',
						action='store', 
						dest='author', 
						help='Author')
	parser.add_option('--tags', 
						action='store', 
						dest='tags', 
						help='Tags for new tiddlers')
	parser.add_option('--verbose', '-v',
						action='store_true', 
						dest='verbose', 
						default='False',
						help='Turns on verbose output')
	parser.add_option('--debug',
						action='store_true', 
						dest='debug', 
						default='False',
						help='Turns on debugging output')

	(options, args) = parser.parse_args()

	tex2wiki(InFile=options.infile,
			WikiFile=options.outfile,
			ImageDir=options.image_dir,
			BibFile=options.BibFile,
			DocTitle=options.title,
			Author=options.author,
			Tags=options.tags,
			verbose=options.verbose,
			debug=options.debug)
}}}

The universe is [[expanding|Cosmological Expansion and the Big Bang]], which means that as time goes forward, everything moves further apart from everything else. This also means that in the past, when the universe was younger, everything was closer together than it is now. And what happens when you take a gas and compress it? It gets warmer. If you compress it a lot, it gets hot. Therefore, the Big Bang theory predicts that the early universe must have been hot, and the very early universe must have been very, very hot. This simple prediction of the Big Bang has an interesting consequence: if the early universe was hot, there must be a remnant "haze" of light left over from these early times. 

Here's how this works: not long after the Big Bang, the universe was hot enough that all of the hydrogen gas in space was [[ionized|http://en.wikipedia.org/wiki/Ionized]], with the electrons in the hydrogen separated from the protons. This happens at a temperature of about 3000 [[Kelvin|http://en.wikipedia.org/wiki/Kelvin]]. The universe is mostly hydrogen today, but at this early time, it was almost entirely hydrogen, with about 25% helium by weight, and trace amounts of Lithium. In such a [[plasma|http://en.wikipedia.org/wiki/Plasma_%28physics%29]] of ionized hydrogen, photons (//i.e.// light) scatter rapidly off the free electrons in the plasma, and the gas is therefore //opaque// to light. The at this time would have looked like a dense fog, glowing at about the same temperature as the surface of the sun. However, as the universe expanded, the gas gradually cooled, until its temperature dropped below 3000 K, which is cool enough that the protons capture the free electrons in the plasma, forming electrically neutral atoms of hydrogen. Neutral hydrogen is //transparent// to light, so the formation of neutral hydrogen in the early universe was like the sudden clearing of a fog. Once the universe became transparent, photons ceased to scatter and were free to travel through space in all directions unimpeded, shown schematically in the figure below. 

[img[Giordano 6.11|./images/CMB/recombfigure.jpg]]

Since the Big Bang happened everywhere in space at once (see [[Cosmological Expansion and the Big Bang]]), this process of recombination of atoms and release of photons likewise happened uniformly everywhere in the universe. The result is a so-called //background// of relict photons form the early universe, filling space uniformly and traveling in every direction. This is a fundamental and nontrivial prediction of the Big Bang model of cosmology. 

What do we observe today? Since the photons were initially interacting rapidly with the matter in the universe, they were in thermal equilibrium with the matter, and therefore had a [[black body spectrum|https://secure.wikimedia.org/wikipedia/en/wiki/Black_body]] with a temperature equal to the recombination temperature, about 3000 K. As the universe expanded, these photons [[increased in wavelength|https://secure.wikimedia.org/wikipedia/en/wiki/Redshift]], which for a black body spectrum can also be thought of as //cooling//. Expansion of the universe changed the temperature of the relict photon gas, and today we measure the temperature of the photon background to be about 2.7 Kelvin. This means that the universe has expanded by a factor of 3000 / 2.7, or about 1100, since recombination. A black body spectrum at 2.7 Kelvin peaks in the microwave band, so these leftover photons from the Big Bang are called the //Cosmic Microwave Background//.
The name "Big Bang" for the beginning of the universe is in some ways a very misleading term, since it immediately brings to mind an explosion, with debris flying out in all directions from the center of the explosion. This is not at all how the Big Bang works.

The basic principle behind a Big Bang (or //Friedmann-Robertson-Walker//) spacetime is: the universe has no center, and no edge. Another way to state this [[Cosmological Principle|http://en.wikipedia.org/wiki/Cosmological_principle]] is that no place in the universe is special: every observer in the universe sees, on average, the same thing.  If the universe had a center or an edge, not all observers would see the same thing, since some would be closer to the center (or to the edge) than others. This is a generalization of the [[Copernican Principle|http://en.wikipedia.org/wiki/Copernican_principle]], which is the realization that the earth is not in a special place in the universe. The Cosmological Principle makes this democratic: //nobody// is in a special place in the universe. 

The simplest example of a space with no center and no edge is an infinite, [[geometrically flat|Curved Spaces]] space. It is easy to imagine such a space in two dimensions: it is just a plane extending infinitely in all directions. How can such a space expand? Imagine a flat sheet of rubber. Draw a grid of squares on the sheet, and then pull the sheet evenly in all directions. The squares will get bigger as the rubber stretches, but will still stay square, like this:

[img[Cosmological Expansion|./images/Expansion/grid.gif]]

You can make this stretching sheet as big as you want. You can even make the sheet //infinitely// large, extending forever in all directions. It is only the squares on the sheet which get larger with time. Such an infinite, stretching sheet obeys the Cosmological Principle: every square is like every other. There is no center, and there is no edge. The generalization to three spatial dimensions is trivial: imagine a 3-D space sliced up into cubes, with the cubes growing in time just like the squares in the 2-D figure above. Such a space can also extend infinitely in every direction. 

What does an observer in such a space see? Imagine Bob is sitting on the black dot in the figure below, looking at Eve and Alice in two distant galaxies, represented by green and blue dots:

[img[Hubble Law 1|./images/Expansion/Bob1.jpg]]

Let's say the squares are 10 million light years on a side, so that the Eve is 10 million light years away from Bob, and Alice is 20 million light years away. Now let the space expand until all the squares have doubled in size, so it looks like this:

[img[Hubble Law 2|./images/Expansion/Bob2.jpg]]

The squares are now 20 million light years on a side. Bob hasn't changed position: he sees himself at rest relative to the cosmological expansion. Eve and Alice have changed position, so Bob sees them as moving away from him. How far have they moved? We can just count the squares. Since the squares are now 20 million light years on a side, Eve is now 20 million light years away: her distance from Bob has doubled. Alice, separated by two squares, is now 40 million light years away: her distance has also doubled, which means that she has traveled twice as far as Eve in the same amount of time. The further galaxy is moving away faster! The reason for this is that, in any given period of time, each square increases in size by the same amount, so the change in the distance between any two galaxies in the same period of time is proportional to the number of squares (the distance) between the two galaxies. Another way to say this is that the recession velocity is proportional to the distance, called the //Hubble Law//:
\begin{equation}
v = H d
\end{equation}
where the proportionality factor \(H\) is called the //Hubble constant//. In the real universe, the Hubble constant is 22 kilometers per second (km/s) for every million light years. That is, we see a galaxy 10 million light years away from us receding at 220 km/s, a galaxy 20 million light years away is receding at 440 km/s, and a galaxy 30 million light years away is receding at 660 km/s. 

But isn't Bob then in some sense in the "center" of the expansion, since he is stationary and everybody else is moving away from him? Let's examine the situation from Eve's perspective. From Eve's point of view,  Bob and Alice are initially equal distances away:

[img[Hubble Law 3|./images/Expansion/Eve1.jpg]]

At the later time, Eve sees herself in the same spot, but the distances to Bob and Alice have each doubled:

[img[Hubble Law 4|./images/Expansion/Eve2.jpg]]

In exactly the same way, Alice sees herself at rest,

[img[Hubble Law 5|./images/Expansion/Alice1.jpg]]

with Bob and Eve both moving away from her:

[img[Hubble Law 6|./images/Expansion/Alice2.jpg]]

All three points of view, Bob's, Eve's, and Alice's, are consistent with one another. The distance between Bob and Eve is always one square, and between Bob and Alice, two squares. Each observer sees the universe from a different reference frame, but, via relativity, each reference frame is equally valid. All observers in the spacetime see themselves at rest, with the rest of the universe expanding away from them. Furthermore, this simple expanding space model predicts that we should see recession velocity proportional to distance, which is what is observed in the actual universe. The figure below, from [[this 2003 paper|http://arxiv.org/abs/astro-ph/0308418]] by Wendy Freedman and Michael Turner, shows data measuring cosmological expansion using various techniques:

[img[Hubble Law 7|./images/Expansion/HubbleLawData.jpg]]

The vertical axis in the top panel is the recession velocity of different galaxies in km/s, and the horizontal axis is the distance to the galaxies in Megaparsec (Mpc), where 1 Mpc is equal to 3.26 million light years. The bottom panel shows the Hubble constant inferred from the different data, showing that all of the measurements are consistent with a linear relation between recession velocity and distance. This linear relation is not perfect: in fact, if we look at distant enough galaxies, the linear relationship breaks down. This is evidence that the expansion of the universe is not happening at a constant rate, but is speeding up slightly with time.

What does this observed expansion mean about the history of the universe? Since the space is expanding as we go forward in time, that must mean that as we go backward in time, the grid we have drawn on our stretchable sheet must get smaller. If we could run time in reverse, the universe would look like this:

[img[Big Bang|./images/Expansion/backward_small.gif]]

The squares get smaller and smaller as we go backward in time. If we solve the equations from Einstein's General Theory of Relativity, we find that all of the squares go to //zero// size at a finite time in the past: the universe had a beginning! This beginning is a state of infinite density, since all of the matter in the universe is packed together infinitely closely, and is called the //Big Bang//. Note that the universe itself does //not// go to zero size at the Big Bang: all of the squares become infinitely small, but the //grid// of squares extends infinitely in all directions at all times. There is no "center", and the Big Bang did not happen in one place like an explosion. Rather, the Big Bang  happened //everywhere// in an infinite space at the same time. 

Is the universe //really// spatially infinite? Nobody can say for sure, because it is impossible to observe the entirety of the space. We can only see a small piece of the whole space, called our //horizon//. (For further reading on this, see [[Cosmological Horizons]].) All we can say for sure from observations is that the universe must be much larger than our horizon, so that for all practical purposes we can treat it as infinite. What we do know from observation is that the space really is Euclidean, like the flat grid in the description above. The most convincing data supporting this come from observations of the Cosmic Microwave Background like that of the [[WMAP satellite|http://map.gsfc.nasa.gov/universe/uni_shape.html]]. 
One of the most remarkable and profound results of science in the 20th Century was the discovery that the universe has a finite age: there was a beginning to space and time. This conclusion is a straightforward consequence of the observed [[expansion of the universe|Cosmological Expansion and the Big Bang]]. The [[WMAP satellite|http://map.gsfc.nasa.gov/]] has made an amazingly precise measurement of the age of the universe, \(13.73\pm0.12\) billion years. You should be impressed by this: human beings have pinpointed the moment of creation to an accuracy of \(1\%\)! (And, no, it was not [[six thousand years ago|http://creationwiki.org/Biblical_age_of_the_Earth]].)

If the universe has a finite age, then the universe we can see must have a finite //size// as well. Why? Because the speed of light is finite. When we look out in space with a telescope, we do not see the universe as it is today, but instead we see it as it //was//, because light takes time to travel to us. It takes light eight minutes to get from the Sun to the Earth, so when we look at the sun, we see it as it was eight minutes ago. When we look at the nearest star, four light years away, we see it as it was four years ago. When we look at the Andromeda galaxy, we see it as it was two //million// years ago. The further out in space we look, the further back in time we see. As scientists who study the history of the world, cosmologists are uniquely fortunate, because we do not have to piece together fossils of the past. All we have to do is look: if we build a telescope that can can see out a billion light years, we can see the universe as it was a billion years ago. The entire history of the universe is laid bare for us to see. 

However, this comes with a downside of sorts. Even though the universe itself may well be infinite, we can only observe a small patch of the larger universe, called our //horizon//. Imagine a beacon that turns on at the moment of the Big Bang, and sends out a signal in all directions. That signal takes time to travel through space: one year after the Big Bang, the signal has only had time to travel one light year. A million years after the Big Bang, the signal has traveled a million light years, and so forth. Meanwhile, other parts of the universe are moving away from the beacon because of [[cosmological expansion|Cosmological Expansion and the Big Bang]]. If we follow the signal through the expanding universe, it will look something like this:

[img[Cosmological Expansion|./images/Expansion/grid_md.gif]]

Note that the signal from the beacon, seen in the figure above as a green circle, catches up to the expanding spacetime (for example an observer at the position of the blue dot on the right.) Suppose our position in the universe is marked by the dot at the center of the figure. The green circle represents how far light has traveled moving away from us, starting from the Big Bang. Therefore, it also represents how far light can have traveled //toward// us since the Big Bang: it's as far out as we can see in the universe. This is our horizon. Even if the universe as a whole is infinite, we can only see the small patch inside our horizon, which moves away from us at the speed of light. If, as in the figure above, our horizon catches up to the expanding spacetime, we see more and more of the universe as time goes along. 

The universe in the Big Bang model is completely uniform: every point is exactly the same as every other point. But that is not what we see when we look at the sky. Because the speed of light is finite, we see ourselves sitting at rest in the exact center of a spherical region of space with everything moving away from us, a situation weirdly reminiscent of [[Aristotle's cosmology|http://classics.mit.edu/Aristotle/heavens.html]]. However, since every point is the same as every other point, //every// observer in the universe must see exactly the same thing. (This is not a contradiction: see [[Cosmological Expansion and the Big Bang]] for an explanation.) Also because the speed of light is finite, the further out we look in space, the further back we see in time. Curiously, if we could build a telescope which could see out 13.7 billion light years, we could look in any direction and see the universe as it was 13.7 billion years ago. If we look out far enough //in any direction//, we will see the Big Bang! Contrary to the popular picture of the Big Bang as a center from which everything in the universe expands, in the real universe every observer sees herself in the center, at rest, with the Big Bang as a spherical surface receding from her at the speed of light. 

In practice, we cannot build a telescope which sees all the way back to the moment of the Big Bang, because the young universe was opaque to light. However, the universe became transparent at an age of about 300,000 years, so we //can// build telescopes to look out just 300,000 light years short of that 13.7 billion, to image the primordial "flash" from the hot early universe. This is not science fiction. That primordial flash is the [[Cosmic Microwave Background]], and it has been observed in exquisite detail, most recently by the  [[WMAP satellite|http://map.gsfc.nasa.gov/]], and new observations are in progress by the [[Planck satellite|http://www.esa.int/SPECIALS/Planck/index.html]]. Because looking back in space is looking back in time, we can look out and actually //see// the Big Bang, or at least its immediate aftermath. 

One recent discovery about cosmological expansion changes our picture of the horizon a bit: observations of [[Type Ia Supernovae|http://www.supernova.lbl.gov/]] indicate that the expansion of the universe is not slowing down as was expected by cosmologists. The expansion of the universe is actually //speeding up//. This is very counterintuitive: since gravity is an attractive force, one would expect that the collective gravitational pull between galaxies in an expanding universe would cause the expansion to slow down. For some mysterious (and not well understood) reason,  the opposite is happening. When the expansion of the universe is speeding up instead of slowing down, our horizon no longer "catches up" to the expanding space. This is because things that are moving away from us at less than the speed of light now will be moving away from us at //greater// than the speed of light in the future because of the accelerating expansion. Our horizon always increases in size at the speed of light, so the ever-accelerating expansion "outruns" the horizon, as in the figure below:

[img[Cosmological Expansion|./images/Expansion/grid_infl.gif]]

Note that in this figure, the point represented by the blue dot on the right is pulling away from the horizon because of the accelerating expansion. Instead of seeing more and more of the universe with time, we see less and less of it! Everything that is not gravitationally bound to us will sooner or later be moving away from us at faster than the speed of light and will be outside of our observable universe. The Milky Way galaxy and a few other members of the [[Local Group|http://en.wikipedia.org/wiki/Local_Group]] of galaxies will be alone in an empty, dark universe. Cosmology will be impossible, because there will be no way for future civilizations to measure the expansion of the universe. A very strange end of the world indeed!
Under construction.
/***
|Name|DcTableOfContentsPlugin|
|Author|[[Doug Compton|http://www.zagware.com/tw/plugins.html#DcTableOfContentsPlugin]]|
|Contributors|[[Lewcid|http://lewcid.org]], [[FND|http://devpad.tiddlyspot.com]], [[ELS|http://www.tiddlytools.com]]|
|Source|[[FND's DevPad|http://devpad.tiddlyspot.com#DcTableOfContentsPlugin]]|
|Version|0.4.1|
|~CoreVersion|2.2|
<<showtoc>>
!Description
This macro will insert a table of contents reflecting the headings that are used in a tiddler and will be automatically updated when you make changes.  Each item in the table of contents can be clicked on to jump to that heading.  It can be used either inside of select tiddlers or inside a system wide template.

A parameter can be used to show the table of contents of a seperate tiddler, &lt;<showtoc tiddlerTitle>&gt;

It will also place a link beside each header which will jump the screen to the top of the current tiddler.  This will only be displayed if the current tiddler is using the &lt;<showtoc>&gt; macro.

The appearance of the table of contents and the link to jump to the top can be modified using CSS.  An example of this is given below.

!Usage
!!Only in select tiddlers
The table of contents above is an example of how to use this macro in a tiddler.  Just insert &lt;<showtoc>&gt; in a tiddler on a line by itself.

It can also display the table of contents of another tiddler by using the macro with a parameter, &lt;<showtoc tiddlerTitle>&gt;
!!On every tiddler
It can also be used in a template to have it show on every tiddler.  An example ViewTemplate is shown below.

//{{{
<div class='toolbar' macro='toolbar -closeTiddler closeOthers +editTiddler permalink references jump'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'>Created <span macro='view created date DD-MM-YY'></span>, updated <span macro='view modified date DD-MM-YY'></span></div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class="toc" macro='showtoc'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
//}}}

!Examples
If you had a tiddler with the following headings:
{{{
!Heading1a
!!Heading2a
!!Heading2b
!!!Heading3
!Heading1b
}}}
this table of contents would be automatically generated:
* Heading1a
** Heading2a
** Heading2b
*** Heading3
* Heading1b
!Changing how it looks
To modifiy the appearance, you can use CSS similiar to the below.
//{{{
.dcTOC ul {
	color: red;
	list-style-type: lower-roman;
}
.dcTOC a {
	color: green;
	border: none;
}

.dcTOC a:hover {
	background: white;
	border: solid 1px;
}
.dcTOCTop {
	font-size: 2em;
	color: green;
}
//}}}

!Revision History
!!v0.1.0 (2006-04-07)
* initial release
!!v0.2.0 (2006-04-10)
* added the [top] link on headings to jump to the top of the current tiddler
* appearance can now be customized using CSS
* all event handlers now return false
!!v0.3.0 (2006-04-12)
* added the ability to show the table of contents of a seperate tiddler
* fixed an error when a heading had a ~WikiLink in it
!!v0.3.5 (2007-10-16)
* updated formatter object for compatibility with TiddlyWiki v2.2 (by Lewcid)
!!v0.4.0 (2007-11-14)
* added toggle button for collapsing/expanding table of contents element
* refactored documentation
!To Do
* code sanitizing/rewrite
* documentation refactoring
* use shadow tiddler for styles
!Code
***/
//{{{

version.extensions.DcTableOfContentsPlugin= {
	major: 0, minor: 4, revision: 0,
	type: "macro",
	source: "http://devpad.tiddlyspot.com#DcTableOfContentsPlugin"
};

// Replace heading formatter with our own
for (var n=0; n<config.formatters.length; n++) {
	var format = config.formatters[n];
	if (format.name == 'heading') {
		format.handler = function(w) {
			// following two lines is the default handler
			var e = createTiddlyElement(w.output, "h" + w.matchLength);
			w.subWikifyTerm(e, this.termRegExp); //updated for TW 2.2+

			// Only show [top] if current tiddler is using showtoc
			if (w.tiddler && w.tiddler.isTOCInTiddler == 1) {
				// Create a container for the default CSS values
				var c = createTiddlyElement(e, "div");
				c.setAttribute("style", "font-size: 0.5em; color: blue;");
				// Create the link to jump to the top
				createTiddlyButton(c, " [top]", "Go to top of tiddler", window.scrollToTop, "dcTOCTop", null, null);
			}
		}
		break;
	}
}

config.macros.showtoc = {
	handler: function(place, macroName, params, wikifier, paramString, tiddler) {
		var text = "";
		var title = "";
		var myTiddler = null;

		// Did they pass in a tiddler?
		if (params.length) {
			title = params[0];
			myTiddler = store.getTiddler(title);
		} else {
			myTiddler = tiddler;
		}

		if (myTiddler == null) {
			wikify("ERROR: Could not find " + title, place);
			return;
		}

		var lines = myTiddler .text.split("\n");
		myTiddler.isTOCInTiddler = 1;

		// Create a parent container so the TOC can be customized using CSS
		var r = createTiddlyElement(place, "div", null, "dcTOC");
		// create toggle button
		createTiddlyButton(r, "toggle", "show/collapse table of contents",
			function() { config.macros.showtoc.toggleElement(this.nextSibling); },
			"toggleButton")
		// Create a container so the TOC can be customized using CSS
		var c = createTiddlyElement(r, "div");

		if (lines != null) {
			for (var x=0; x<lines.length; x++) {
				var line = lines[x];
				if (line.substr(0,1) == "!") {
					// Find first non ! char
					for (var i=0; i<line.length; i++) {
						if (line.substr(i, 1) != "!") {
							break;
						}
					}
					var desc = line.substring(i);
					// Remove WikiLinks
					desc = desc.replace(/\[\[/g, "");
					desc = desc.replace(/\]\]/g, "");

					text += line.substr(0, i).replace(/[!]/g, '*');
					text += '<html><a href="javascript:;" onClick="window.scrollToHeading(\'' + title + '\', \'' + desc+ '\', event)">' + desc+ '</a></html>\n';
				}
			}
		}
		wikify(text, c);
	}
}

config.macros.showtoc.toggleElement = function(e) {
	if(e) {
		if(e.style.display != "none") {
			e.style.display = "none";
		} else {
			e.style.display = "";
		}
	}
};

window.scrollToTop = function(evt) {
	if (! evt)
		var evt = window.event;

	var target = resolveTarget(evt);
	var tiddler = story.findContainingTiddler(target);

	if (! tiddler)
		return false;

	window.scrollTo(0, ensureVisible(tiddler));

	return false;
};

window.scrollToHeading = function(title, anchorName, evt) {
	var tiddler = null;

	if (! evt)
		var evt = window.event;

	if (title) {
		story.displayTiddler(store.getTiddler(title), title, null, false);
		tiddler = document.getElementById(story.idPrefix + title);
	} else {
		var target = resolveTarget(evt);
		tiddler = story.findContainingTiddler(target);
	}

	if (tiddler == null)
		return false;
	
	var children1 = tiddler.getElementsByTagName("h1");
	var children2 = tiddler.getElementsByTagName("h2");
	var children3 = tiddler.getElementsByTagName("h3");
	var children4 = tiddler.getElementsByTagName("h4");
	var children5 = tiddler.getElementsByTagName("h5");

	var children = new Array();
	children = children.concat(children1, children2, children3, children4, children5);

	for (var i = 0; i < children.length; i++) {
		for (var j = 0; j < children[i].length; j++) {
			var heading = children[i][j].innerHTML;

			// Remove all HTML tags
			while (heading.indexOf("<") >= 0) {
				heading = heading.substring(0, heading.indexOf("<")) + heading.substring(heading.indexOf(">") + 1);
			}

			// Cut off the code added in showtoc for TOP
			heading = heading.substr(0, heading.length-6);

			if (heading == anchorName) {
				var y = findPosY(children[i][j]);
				window.scrollTo(0,y);
				return false;
			}
		}
	}
	return false
};
//}}}
[[Welcome!]]
//Ekpyrosis//

''-noun''
A theory of cosmology consisting of a collapsing universe, followed by a magical event, followed by whatever you want. An alternative to [[Inflation]].
A basic concept in statistical physics is the idea of //entropy//, or disorder. A low-entropy system is said to be in an  //ordered// state, and a high-entropy system is said to be in a //disordered// state. The Second Law of Thermodynamics states that any isolated system is overwhelmingly likely to evolve toward increasing disorder, or increasing entropy, with time. What does this mean?

Entropy comes from randomness. As an example of a random system, consider standard, six-sided dice. If I roll //one// die, there are six possible outcomes:

[img[Single Die Outcomes|./images/Dice/singledie.jpg]]

The die is equally likely to land with any of its faces upward, //i.e.// the numbers 1-6 have equal odds, a one-in-six chance, which we call the //probability//, and represent with a fraction 

\[P = \frac{1}{6}\].

Now if I roll //two// dice, there are \(6 \times 6 = 36\) possible outcomes:

[img[Two Dice Outcomes|./images/Dice/dicetable.jpg]]

The probability of any number showing on any one of the dice is the same as before, \(1/6\). So what is the probability of rolling two threes? Of the 36 possible outcomes for rolling two dice, only //one// of them is two threes:

[img[Two Threes|./images/Dice/twothrees.jpg]]

Since each of the thirty-six possible outcomes is equally likely, the probability of rolling two threes is one in thirty-six:

\[P = \frac{1}{36}.\]

Let's ask another question: what is the probability that one of the two dice (we don't care which) is a three, and the other is a four? It is not hard to see that there are two ways for this to happen:

[img[Three Four|./images/Dice/threefour.jpg]]

Since there are two ways to achieve the same outcome, and each outcome is equally likely, there is a probability of two in thirty-six, or one in eighteen, of getting one three and one four:

\[P = 2 \times \frac{1}{36} = \frac{1}{18}.\]

The number of different ways to achieve the same result is called the //multiplicity//. In our example, a result of two threes has a multiplicity of one, and one three and one four has a multiplicity of two. 

Let us ask yet another question: if I roll two dice, what is the probability that both dice will read the //same// number?  There are six ways to do this:

[img[Three Four|./images/Dice/same.jpg]]

The total probability is therefore six out of thirty-six, or one in six:

\[P= 6 \times \frac{1}{36} = \frac{1}{6}.\]

There are two rules:
''(1)  Any roll of the dice has the same probability as any other.''
''(2) The total probability of a given result is the probability of any particular roll times the number of different ways to achieve the result (the //multiplicity//).''

Results with a higher multiplicity are therefore more probable than results with lower multiplicity: the more ways there are for something to happen, the more likely it //will// happen. This, in a nutshell, is the idea of entropy. 

Suppose I roll three dice: what is the probability that all three dice will show the same number? This is easy to figure out. For three dice, the number of possible outcomes is \(6 \times 6 \times 6 = 6^{3} = 216\) (too many to diagram!). The probability of any particular outcome is one out of the number of possible outcomes:

\[P = \frac{1}{216}\]. 

How many different ways can I roll three dice the same? There are still only six:

[img[Three the same|./images/Dice/threesame.jpg]]

Therefore, rolling three dice the same has a probability of 6 out of 216:

\[P = 6 \times \frac{1}{216} = \frac{1}{36}.\]

What are the odds of //twelve// dice coming up the same number? The number of possible outcomes is \(6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 \times 6 = 6^{12} = 2,176,782,336\): more than two //billion// possibilities! However, there are still only six ways for all of the dice to come up the same number:

[img[Twelve the same|./images/Dice/twelvesame.jpg]]

The probability of twelve dice coming up the same is

\[P = 6 \times \frac{1}{6^{12}} =  6 \times \frac{1}{2,176,782,336} = \frac{1}{362,797,056},\]

or around on in 360 million. Notice that there is no difference probability-wise between rolling twelve dice, and rolling one die twelve times, so from now on we will think of sequences of dice rolls. The numbers get very big very fast: the number of possible outcomes for sixty dice rolls is

\[6^{60} = 48,873,677,980,689,257,489,322,752,273,774,603,865,660,850,176\].

Note that each realization of a random process is //unique// and //equally unlikely//. If you roll sixty dice, //something// will happen, but the probability that you will get any particular outcome is the astonishingly low number of one in \(6^{60}\). Everything that happens in the world is a small miracle, fabulously improbable if looked at in enough detail. 

However, this does not mean that nature is hopelessly unpredictable! We may not be able to predict the specific outcome of the roll of sixty dice, but we can say with near-certainty that the dice will //not// come up all the same number.  If I roll sixty dice (or one die sixty times -- it doesn't matter), they will almost always come out more-or-less evenly distributed among each number, //i.e.// 10 ones (give or take a couple), 10 twos (give or take), and so on, arranged in some random order. The probability of any particular outcome is astronomically small, but there are an astronomically huge //number// of ways to roll 10 ones, 10 twos, and so forth. The probability of rolling ten of each number is then astronomically larger than the probability of  rolling the same number sixty times in a row. The difference is the multiplicity: the number of different ways to roll the same number sixty times is:

\[N = 6\].

Therefore, the probability that I will roll sixty in a row of the same number is

\[P = 6 \times \frac{1}{6^{60}},\]

or one chance in 8,145,612,996,781,542,914,887,125,378,962,433,977,610,141,696! You can roll dice from now until all the stars in the universe burn out, and it is still safe to say that you will //never// roll sixty in a row of the same number. 

The case where the numbers on the dice are about evenly distributed is very different. [[The number of different ways to roll ten of each number]] is:

\[N = 3,644,153,415,887,633,116,359,073,848,179,365,185,734,400\].

This means that, although the probability of any one outcome for 60 dice rolls is one in \(6^{60}\), the number of different ways to roll ten of each number is also astronomically large, so the total probability is

\[P = \frac{3,644,153,415,887,633,116,359,073,848,179,365,185,734,400}{48,873,677,980,689,257,489,322,752,273,774,603,865,660,850,176} \simeq  \frac{1}{13,412}.\]

A probability of about one in 13,000 is still pretty small odds, but this is //vastly// larger than the probability of rolling the same number 60 times. And remember that this is the probability of getting //exactly// ten of each number on the dice. Even if this exact outcome is unlikely, it is //very// likely you will get something close to this, //i.e.// a result more-or-less evenly divided among ones, twos, threes, etc.  

All of these big numbers are fun to write down, but they are a bit cumbersome. It is much more tractable to work with the logarithms of the big numbers, and this is exactly what the entropy is. The //entropy//, denoted by the symbol \(S\), is the logarithm of the multiplicity:

\[S = \ln{N}\],

where by convention we use the [[natural logarithm|https://en.wikipedia.org/wiki/Natural_logarithm]]. The multiplicity is then just the exponential of the entropy

\[N = e^{S}\], 

where the number \(e = 2.7182818\) is [[Napier's constant|https://en.wikipedia.org/wiki/Napier%27s_constant]]. Using this definition, the entropy of a set of sixty dice with all the same number is 

\[S = \ln{(6)} = 1.8\]

and the entropy of a set of sixty dice with 10 of each number is:

\[S = \ln{\left(\frac{60!}{(10!)^{6}}\right)} = 98\]. 

The higher the entropy, the higher the probability. Since the probability is proportional to the multiplicity, it is proportional to the //exponential// of the entropy:

\[P = P_0 e^{S}\],

where \(P_0\) is the probability of any specific outcome, such as a roll of the dice. 

Now the rule that entropy in the universe always increases, called the //Second Law of Thermodynamics//, is not so mysterious. It is simply a statement about probabilities. Suppose I arrange sixty dice in a box so that all have the number three showing:

[img[Sixty Threes|./images/Dice/sixtythrees.jpg]]

Any specific arrangement of the dice is called a //state// of the system. Sixty threes is a state of minimum entropy, since there is only one way to do it, and therefore the entropy is zero:

\[S = \ln{(1)} = 0\]. 

A state with low entropy (or, equivalently low multiplicity) is also referred to as an //ordered// state. Now suppose I close the box with the neatly arranged dice, and give it a shake. What will be the result?  The probability of any outcome is proportional to the exponential of the entropy of that outcome,

\[P = \frac{1}{6^{60}} e^S.\]

This means that, if I start in a low-entropy state, the system is //exponentially// likely to evolve to a high-entropy state at some later time. A high-entropy state is referred to as a //disordered// state. Systems evolve from order to disorder because disorder is astronomically more probable than order.  

This property of evolution from order (low entropy) to disorder (high entropy) doesn't just apply to dice, of course. It applies to //anything// involving random behavior.  The numbers involved in real physical systems are unimaginably large. Instead of sixty dice, there are around \(100,000,000,000,000,000,000,000\) gas molecules in a child's balloon, and the number of possible ways these molecules can move, spin, or vibrate is huge. And when the numbers involved get big enough, statements about probability come within a razor's edge of being statements about //certainty//. 

How many different ways are there to arrange a set of items, for example books on a bookshelf? There is only one way to arrange one book:

A

There are two ways to arrange two books:

A B
B A

How many ways are there to arrange three books? If I add a book "C" to my shelf, there are three possible locations: third, second, or first. For each of these possibities, there are two ways to arrange the remaining books, giving six in total:

A B C
B A C

A C B
B C A

C A B
C B A



If I add a fourth book, "D", I can put it in any one of four places (first through forth). For each of those choices, there are then //six// ways to arrange the remaining three books, so the total number of possible arrangements is

$N = 4 \times 6 = 24$. 

A B C D
B A C D                           
A C B D                           
B C A D                           
C A B D                           
C B A D                           

A B D C 
B A D C
A C D B 
B C D A
C A D B
C B D A

A D B C
B D A C
A D C B
B D C A
C D A B
C D B A

D A B C
D B A C
D A C B
D B C A
D C A B
D C B A

There is a pattern here: for one book, there is 

$N = 1$

way to arrange it. For two books, there are

$N = 2 \times 1 = 2$

ways to arrange them. For three books, there are 

$N = 3 \times 2 \times 1 = 6$

ways.  Four four books, the number of possibilities is

$N = 4 \times 3 \times 2 \times 1 = 24.$

This is called a //factorial//, and is denoted by an exclamation point ($!$). The number of ways to arrange $n$ unique objects is given by $n!$ (read "n factorial"), defined as

$n! = n \times (n - 1) \times (n - 2) \times \cdots \times 3 \times 2 \times 1.$

For example, the number of ways to arrange six objects is 

$6! = 6 \times 5 \times 4 \times 3 \times 2 \times 1 = 720$. 

The exclamation point is appropriate, for the factorial is a wonderful and terrifying thing: the number of different ways to arrange 70 books on a shelf is approximately

$70! = 70 \times 69 \times 68 \times \cdots \times 2 \times 1 \simeq 10^{100},$

that is a one followed by 100 zeros, far greater than the number of atoms in the universe! 
[img(100%,auto)[./images/Inflation/MinkowskiLightcone.png]]

//Light cones in Minkowski Space. The past light cone defines the causal past of the event \(P\), and the future light cone defines the causal future of \(P\).//
/***
|''Name:''|FootnotesPlugin|
|''Description:''|Create automated tiddler footnotes.|
|''Author:''|Saq Imtiaz ( lewcid@gmail.com )|
|''Source:''|http://tw.lewcid.org/#FootnotesPlugin|
|''Code Repository:''|http://tw.lewcid.org/svn/plugins|
|''Version:''|2.01|
|''Date:''|10/25/07|
|''License:''|[[Creative Commons Attribution-ShareAlike 3.0 License|http://creativecommons.org/licenses/by-sa/3.0/]]|
|''~CoreVersion:''|2.2.2|

!!Usage:
*To create a footnote, just put the footnote text inside triple backticks.
*Footnotes are numbered automatically, and listed at the bottom of the tiddler.
*{{{Creating a footnote is easy. ```This is the text for my footnote```}}}
*[[Example|FootnotesDemo]]
***/
// /%
//!BEGIN-PLUGIN-CODE
config.footnotesPlugin = {
	backLabel: "back",
	prompt:"show footnote"
};

config.formatters.unshift( {
    name: "footnotes",
    match: "```",
    lookaheadRegExp: /```((?:.|\n)*?)```/g,
    handler: function(w)
    {
        this.lookaheadRegExp.lastIndex = w.matchStart;
        var lookaheadMatch = this.lookaheadRegExp.exec(w.source);
        if(lookaheadMatch && lookaheadMatch.index == w.matchStart )
            {
			var tiddler = story.findContainingTiddler(w.output);
			if (!tiddler.notes)
				tiddler.notes = [];
			var title = tiddler.getAttribute("tiddler");
			tiddler.notes.pushUnique(lookaheadMatch[1]);
			var pos = tiddler.notes.indexOf(lookaheadMatch[1]) + 1;
			createTiddlyButton(w.output,pos,config.footnotesPlugin.prompt,function(){var x = document.getElementById(title+"ftn"+pos);window.scrollTo(0,ensureVisible(x)+(ensureVisible(x)<findScrollY()?(findWindowHeight()-x.offsetHeight):0));return false;},"ftnlink",title+"ftnlink"+pos);			
			w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
            }
    }
});

old_footnotes_refreshTiddler = Story.prototype.refreshTiddler;
Story.prototype.refreshTiddler = function(title,template,force)
{
    var tiddler = old_footnotes_refreshTiddler.apply(this,arguments);
	if (tiddler.notes && tiddler.notes.length)
	{
		var holder = createTiddlyElement(null,"div",null,"footnoteholder");
		var list = createTiddlyElement(holder,"ol",title+"footnoteholder");
		for (var i=0; i<tiddler.notes.length; i++)
		{
			var ftn = createTiddlyElement(list,"li",title+"ftn"+(i+1),"footnote");
			wikify(tiddler.notes[i]+" ",ftn);
			createTiddlyButton(ftn,"["+config.footnotesPlugin.backLabel+"]",config.footnotesPlugin.backLabel,function(){window.scrollTo(0,ensureVisible(document.getElementById(this.parentNode.id.replace("ftn","ftnlink"))));return false;},"ftnbklink");
		}
		var count = tiddler.childNodes.length;
		for (var j=0; j<count; j++){
			if(hasClass(tiddler.childNodes[j],"viewer")){
				var viewer = tiddler.childNodes[j];	
			}
		}
		viewer.appendChild(holder);
		tiddler.notes = [];
	}
    return tiddler;
};

setStylesheet(
".tiddler a.ftnlink {vertical-align: super; font-size: 0.8em; color:red;}\n"+
".tiddler a.ftnlink:hover, .tiddler .footnoteholder a.ftnbklink:hover{color:#fff;background:red;}\n"+
".tiddler div.footnoteholder{margin:1.8em 1.0em; padding:0.1em 1.0em 0.1em 1.0em ;border-left: 1px solid #ccc;}"+
".tiddler footnoteholder ol {font-size: 0.9em; line-height: 1.2em;}\n"+
".tiddler .footnoteholder li.footnote {margin: 0 0 5px 0;}\n"+
".tiddler .footnoteholder a.ftnbklink{color:red;}\n","FootNotesStyles");
//!END-PLUGIN-CODE
// %/
/***
|Name|GATrackerPlugin|
|Description|Google Analytics tracker|
|Author|Julien Coloos|
|Version|1.2.0|
|Date|2011-05-18|
|Status|stable|
|Source|http://julien.coloos.free.fr/TiddlyWiki-dev/#GATrackerPlugin|
|License|[img[CC BY-SA 3.0|http://i.creativecommons.org/l/by-sa/3.0/80x15.png][http://creativecommons.org/licenses/by-sa/3.0/]]|
|CoreVersion|2.6.2|
|Documentation|http://julien.coloos.free.fr/TiddlyWiki-dev/#GATrackerPlugin|

!Description
This plugin enables Google Analytics tracking inside TiddlyWiki.

The version used is the asynchronous one ({{{ga.js}}}).
The plugin comes with its own configuration, which is stored persistently inside the (hidden) [[SystemSettings]] tiddler.
The configuration has to be set before being effective: it can be done in the plugin tiddler (see below) if TiddlyWiki is not in read-only mode. Tracking works if an account ID has been set, tracking has been enabled, and TiddlyWiki access is non-local.

Tracking can be reported as either:
* page views
** pages are named {{{/#Tiddler name}}}
* events
** Category: {{{Tiddlers}}}
** Action: {{{Open}}}, {{{Refresh}}}, {{{Edit}}}, {{{Search}}}, {{{Close}}} or {{{CloseAll}}}
** Label
*** for {{{CloseAll}}} action: excluded tiddler
*** for {{{Search}}} action: searched text
*** otherwise, tiddler name on which action is performed
** Value: for the {{{CloseAll}}} action, the number of closed tiddlers
** Note: Google Analytics script limits the number of events (1 every 5 seconds, with a burst limit of 10)
Tracking can be globally disabled, or enabled per action on each tiddler:
* //Open//: when tiddler was not yet displayed
** Note: default tiddlers do not trigger this action when accessing TiddlyWiki
* //Refresh//: when tiddler was already displayed
** Note: this action is automatically triggered after editing a tiddler
* //Edit//: when editing (or viewing in read-only mode) the tiddler
* //Close//: when tiddler was displayed
** this action is never tracked in //pages views// tracking
** the //CloseAll// action is triggered by the TiddyWiki links //close all// and //close others// if at least one tiddler was closed; individual tiddlers closed are not tracked as //Close// actions
* //Search//: when searching in tiddlers
** this action is never tracked in //pages views// tracking
** {{{CloseAll}}} and {{{Open}}} actions are not taken into account while search is performed: TiddlyWiki automically closes opened tiddlers before searching and opens tiddler that match the searched text


!Configuration
<<GATrackerConfig>>


!Revision History
!!v1.2.0 (2011-05-18)
Enhancements:
* do not trigger {{{CloseAll}}} and {{{Open}}} actions when search is performed
* added the {{{Search}}} action

!!v1.1.0 (2011-05-17)
Enhancements:
* do not trigger {{{Open}}} action when displaying default tiddlers
* added the {{{CloseAll}}} action

!!v1.0.0 (2011-05-14)
Initial release.


!Code
***/
//{{{
/* Google Analytics queue object. Needs to be global. */
var _gaq = _gaq || [];

if (!config.extensions.GATracker) {(function($) {

version.extensions.GATrackerPlugin = {major: 1, minor: 2, revision: 0, date: new Date(2011, 5, 18)};

/* Prepare overridden TiddlyWiki displaying */
var trackOptions = {};
var displayDefault = 0, closingAll = 0, searching = 0;
var pl = config.extensions.GATracker = {
getOption: function(optKey) {
	return (config.optionsSource && (config.optionsSource[optKey] == "setting")) ? config.options[optKey] : null;
},
setOption: function(optKey, value) {
	config.options[optKey] = value;
	config.optionsSource[optKey] = "setting";
	saveOption(optKey);
},
loadOptions: function() {
	var gaTrack = (pl.getOption("txt_GATracker_track") || "1,0,1,1,1,0,0").split(",");
	trackOptions = {
		id: pl.getOption("txt_GATracker_id"),
		enabled: parseInt(gaTrack[0] || "1"),
		type: parseInt(gaTrack[1] || "0"),
		events: {
			open: parseInt(gaTrack[2] || "1"),
			refresh: parseInt(gaTrack[3]) || "1",
			edit: parseInt(gaTrack[4] || "1"),
			close: parseInt(gaTrack[5] || "0"),
			search: parseInt(gaTrack[6] || "0")
		}
	};
	if (trackOptions.id && !trackOptions.id.length) {
		trackOptions.id = null;
	}
},
saveOptions: function() {
	var opts = trackOptions.enabled && "1" || "0";
	opts += "," + trackOptions.type;
	for (var ev in trackOptions.events) {
		opts += "," + (trackOptions.events[ev] && "1" || "0");
	}
	pl.setOption("txt_GATracker_id", trackOptions.id || "");
	pl.setOption("txt_GATracker_track", opts);
},
track: function() {
	_gaq.push.apply(_gaq, arguments);
},
trackAndDisplayDefaultTiddlers: function() {
	displayDefault = 1;
	try { pl.displayDefaultTiddlers.apply(this, arguments) } catch(e){};
	displayDefault = 0;
},
trackAndDisplayTiddler: function(srcElement, tiddler, template, animate, unused, customFields, toggle, animationSrc) {
	if (!displayDefault) {
		var trackEvent, title = (tiddler instanceof Tiddler) ? tiddler.title : tiddler;
		if (story.getTiddler(title)) {
			/* Tiddler is already displayed */
			if (toggle === true) {
				/* Closing tiddler: tracked in separate function */
			}
			else if (template === DEFAULT_EDIT_TEMPLATE) {
				if (trackOptions.events.edit) trackEvent = "Edit";
			}
			else if (trackOptions.events.refresh) trackEvent = "Refresh";
		}
		else if (trackOptions.events.open && !searching) trackEvent = "Open";

		if (trackEvent) pl.track(trackOptions.type ? ["_trackPageview", "/#" + title] : ["_trackEvent", "Tiddlers", trackEvent, title]);
	}
	pl.displayTiddler.apply(this, arguments);
},
trackAndCloseTiddler: function(title, animate, unused) {
	if (closingAll) closingAll++;
	else pl.track(["_trackEvent", "Tiddlers", "Close", title]);
	pl.closeTiddler.apply(this, arguments);
},
trackAndCloseAllTiddlers: function(excluded) {
	closingAll = 1;
	try { pl.closeAllTiddlers.apply(this, arguments) } catch(e){};
	if ((closingAll > 1) && !searching) pl.track(["_trackEvent", "Tiddlers", "CloseAll", excluded, closingAll - 1]);
	closingAll = 0;
},
trackAndSearch: function(text, useCaseSensitive, useRegExp) {
	if (!trackOptions.type && trackOptions.events.search) pl.track(["_trackEvent", "Tiddlers", "Search", text]);
	searching = 1;
	try { pl.search.apply(this, arguments) } catch(e){};
	searching = 0;
}
};

pl.loadOptions();

/* Only track in non-local mode */
var local = "file:" == document.location.protocol;
if (!local && trackOptions.id && trackOptions.enabled) {
	/* Insert script tag to load GA */
	$("head").eq(0).prepend($("<script/>").attr({type: "text/javascript", async: "true", src: ("https:" == document.location.protocol ? "https://ssl" : "http://www") + ".google-analytics.com/ga.js"}));

	/* Override TiddlyWiki display */
	pl.displayTiddler = story.displayTiddler;
	story.displayTiddler = pl.trackAndDisplayTiddler;
	pl.displayDefaultTiddlers = story.displayDefaultTiddlers;
	story.displayDefaultTiddlers = pl.trackAndDisplayDefaultTiddlers;
	if (!trackOptions.type && trackOptions.events.close) {
		pl.closeTiddler = story.closeTiddler;
		story.closeTiddler = pl.trackAndCloseTiddler;
		pl.closeAllTiddlers = story.closeAllTiddlers;
		story.closeAllTiddlers = pl.trackAndCloseAllTiddlers;
	}
	pl.search = story.search;
	story.search = pl.trackAndSearch;

	/* Initialize tracking */
	pl.track(["_setAccount", trackOptions.id], ["_trackPageview"]);
}

config.macros.GATrackerConfig = {
handler: function(place, macroName, params, wikifier, paramString, tiddler) {
	$(createTiddlyElement(place, "div")).html("Tracking status: <span style='color:" + (trackOptions.id && trackOptions.enabled ? "green'>enabled" : "red'>disabled") + "</span> and <span style='color:" + (local ? "red'>" : "green'>non-") + "local</span>");
	if (readOnly) {
		$(createTiddlyElement(place, "div")).html("Configuration is not available in read-only mode");
		return;
	}
	var formNode = $(createTiddlyElement(place, "div")).html("<div>Google Analytics plugin configuration:</div><table><tr><td>Account ID:</td><td><input id='ga_id' type='text'/></td></tr><tr><td>Tracking:</td><td><input id='ga_enabled' type='checkbox'/>Enabled<br/><br/>How: <select id='ga_track'><option value='0'>Events</option><option value='1'>Pages</option></select><br/><br/>What:<br/><input id='ga_track_open' type='checkbox'/>Open<br/><input id='ga_track_refresh' type='checkbox'/>Refresh<br/><input id='ga_track_edit' type='checkbox'/>Edit<br/><input id='ga_track_close' type='checkbox'/>Close<br/><input id='ga_track_search' type='checkbox'/>Search<br/></td></tr></table><input id='ga_action_submit' type='submit' value='Apply'/>");
	$("#ga_id", formNode).val(trackOptions.id);
	$("#ga_enabled", formNode)[0].checked = trackOptions.enabled;
	$("#ga_track option", formNode).eq(trackOptions.type)[0].selected = true;
	for (var ev in trackOptions.events) {
		$("#ga_track_" + ev, formNode)[0].checked = trackOptions.events[ev];
	}
	$("#ga_action_submit", formNode).click(function() {
		trackOptions.id = $("#ga_id", formNode).val();
		if (!trackOptions.id.length) trackOptions.id = null;
		trackOptions.enabled = $("#ga_enabled", formNode)[0].checked;
		trackOptions.type = parseInt($("#ga_track", formNode).val());
		for (var ev in trackOptions.events) {
			trackOptions.events[ev] = $("#ga_track_" + ev, formNode)[0].checked;
		}
		pl.saveOptions();

		var nodeDisplay = story.findContainingTiddler(place);
		var tiddlerDisplay;
		if (nodeDisplay) tiddlerDisplay = store.getTiddler(nodeDisplay.getAttribute("tiddler"));
		story.refreshTiddler(tiddlerDisplay ? tiddlerDisplay.title : tiddler.title, null, true);
	});
}
};

})(jQuery);}
//}}}
//Generic//

''-adj.''
Something I would like to prove is always true, but can't. 
I amazed and confounded by the existence of state lotteries, such as the New York State Lottery (motto, "[[Hey, you never know|http://nylottery.ny.gov/]].") The money raised by the lottery is [[supposedly|http://www.osc.state.ny.us/reports/schools/1998/4-98.htm]] spent on education. This clearly does not work, since if children were actually being educated by the money spent, none of them would play the lottery when they grow up. To make matters worse, lottery sales are measured to be [[correlated with poverty|http://www.sciencetime.org/blog/?page_id=377]], which makes the lottery an incredibly regressive tax on people who are already financially desperate. 

The next time you stop into a convenience store to buy a lottery ticket, ask yourself the following question: "Would I be willing to bet on [1,2,3,4,5,6]?" If the answer is "no", then you shouldn't be playing the lottery. Let me explain why. 

The multi-state [[Mega Millions|http://www.megamillions.com/]] game works like this: the player chooses five different numbers from 1 to 56, and a "mega ball" number from 1 to 46. The mega ball number does not have to be different from the other five numbers chosen. If you match all five numbers (in any order) but not the mega number, you win $250,000. If you match all six numbers, you win an unspecified jackpot, with the largest cash jackpot in history being [[$240 million|http://en.wikipedia.org/wiki/Mega_Millions#Record_jackpots_.28listed_by_cash_value.29]]. 

What are the odds of winning? The first five numbers from 1 to 56 have to be five //different// numbers. The first choice has 56 possibilities. Once that number is chosen, the second choice has 55 possibilities, and so on. Therefore, the number of different ways one can choose five unique numbers from 1 to 56 is

$$N = 56 \times 55 \times 54 \times 53 \times 52 = 458,377,920$$. 

However, we don't care about the //order// of the numbers: the choice [1, 17, 22, 35, 40] is no different with respect to winning than [40, 1, 22, 17, 35]. So the total number of distinct lottery picks is the number of different five number sequences dividied by the number of ways to rearrange any particular set of  five different numbers, which is given by the [[factorial|Factorial]], \(5! = 5 \times 4 \times 3 \times 2 \times 1 = 120\):

$$N = \frac{56 \times 55 \times 54 \times 53 \times 52}{5 \times 4 \times 3 \times 2 \times 1} = 3,819,816$$. 

Therefore, the odds of winning the $250,000 jackpot are one in 3.8 million. Absolutely terrible odds! On average, you will have to spend $15 to win $1. (In actuality, it's not quite this bad, since matches of four or three numbers have smaller payouts and [[increase the expected return|http://www.durangobill.com/MegaMillionsOdds.html]]. Even taking this into account, the odds still suck.) What about with the sixth, "mega" ball? Since The mega ball doesn't have to be different from the first five numbers chosen, we just multiply by 46, and the number of different lottery picks, including the mega ball, is 

$$N = \frac{56 \times 55 \times 54 \times 53 \times 52}{5 \times 4 \times 3 \times 2 \times 1}  \times 46 = 175,711,536$$. 

The odds of winning the Mega Millions jackpot are about one in 176 million. By comparison, the [[odds of being struck by lightning|http://www.lightningsafety.noaa.gov/medical.htm]] in the next year are about one in 775,000, more than 200 times as likely as winning the lottery.  Nonetheless, if the payout is //bigger// than $176 million, spending a dollar on a lottery ticket is actually a pretty reasonable bet. Remember that you have to take [[taxes|http://taxfoundation.org/article/lottery-tax-rates-vary-greatly-state]] into account! For a winner in New York State, if you take the cash payout (and virtually everyone does), you will pay 25% Federal tax and 8.97% State tax on your winnings. That means that you will have to win at least $267 million to keep $176 million after taxes. 

Except there is one more catch: if more than one person picks the winning number, the jackpot is split, and the expected payout is correspondingly reduced. For $1 bet on a two-way split to be a good bet, the jackpot would have to be at least $534 million, which has never happened. The [[record Mega Millions jackpot|https://en.wikipedia.org/wiki/Mega_Millions#Record_jackpots]] paid $462 million on March 30, 2012, and was split three ways, which came to $154 million per winner before taxes. The [[total amount spent|http://articles.latimes.com/2012/mar/31/local/la-me-mega-millions-20120331]] on $1 tickets to win that $462 million jackpot was $1.5 billion, which meant that for each winning ticket there were //500 million// losing tickets, and the state governments cleared a cool billion dollars in revenue. Keep in mind that there were 1.5 billion tickets sold, but only 176 million possible numbers, so that on average every possible number was bet on more than 8 times during the five-week period between March 30 and the previous jackpot win of January 24. Any winner was virtually certain to have to split the jackpot.

Is there a way to minimize the likelihood of having to split the lottery jackpot? The key is to realize that //any// set of numbers is equally likely to win, so the chance of winning does not depend in any way on which numbers you select. To minimize your chance of having to split the pot, bet on a set of numbers that only an idiot would pick. Which brings us to the [[Spaceballs Strategy|http://www.youtube.com/watch?v=a6iW-8xPw3k]]: bet on the combination [1,2,3,4,5].  Add a "6" for the mega ball. Really. Your odds of hitting the jackpot on a Spaceballs bet are exactly the same as for any other bet you could make, but the odds of anybody else being crazy enough to make the same bet are most likely very, very small. The Spaceballs bet (or one like it) maximizes your chance of being the sole winner of the jackpot. 

"But," you say, "that sequence will never happen!" True enough. For a lottery held once a week, one would on average have to wait 3.4 million years for [1,2,3,4,5,6] to come up. However, the //same is true for any other set of six numbers//. If you think it's dumb to use the Spaceballs bet, then it's dumb to bet on any number in the lottery. Spend the dollar on a newspaper instead. Then the money really will go to education. 



(A short essay I wrote for the University at Buffalo Office of International Admissions' [[UBelong Summer Seminar|http://ubelongclub.wordpress.com/vip-circle/]]. The 2012 seminar discussion centers on the movie [[I Am|http://iamthedoc.com]].)

In the first moments of his documentary of personal transformation //I Am//, Tom Shadyac gets it wrong. It happens when author [[Lynne McTaggart|http://www.lynnemctaggart.com/]] says “science is just another story.” This simple sentence forms the intellectual heart Shadyac’s 80-minute film: if science is just another story, then like any story we are free re-write it any way we find comforting or appealing. 

Is science really just another story? No. If it were, science would be no more useful than the mysticism and myth that it replaced. Unlike a story, science is based on two ideas: first, that facts exist in the world, and second, that it is possible through systematic investigation to understand these facts. As a result, it is possible to actually know things about the world. The Nobel laureate  Richard Feynman explained beautifully the difficulty of this in Volume II of his //Lectures on Physics// (emphasis his): 

“The whole question of imagination in science is often misunderstood by people in other disciplines ... They overlook the fact that whatever we are //allowed// to imagine in science must be //consistent with everything else we know//: that the electric fields and the waves we talk about are not just some happy thoughts which we are free to make as we wish, but ideas which must be consistent with all the laws of physics we know. We can’t allow ourselves to seriously imagine things which are obviously in contradiction to the known laws of nature. And so our kind of imagination is quite a difficult game. One has to have the imagination to think of something that has never been seen before, never been heard of before. At the same time the thoughts are restricted in a strait jacket, so to speak, limited by the conditions that come from our knowledge of the way nature really is. The problem of creating something which is new, but which is consistent with everything that has been seen before, is one of extreme difficulty.”

Shadyac misunderstands this basic concept, and this misunderstanding makes him easy prey for new-age crackpots like McTaggart and [[Rollin McCraty|http://www.thelivingmatrixmovie.com/rollin-mcCraty]] of the [[Institute of HeartMath|http://www.heartmath.org/]] (the guy with the psychic yogurt.) What they do isn’t science, it is only science-like, and here Shadyac takes the Dalai Lama’s exhortation to “think critically, take action” and ignores the first half. The great physicist Wolfgang Pauli coined a phrase for stuff like psychic yogurt that is so crazy that it ceases to be engaged with facts at all: “not even wrong.” Nothing in the film exemplifies “not even wrong” better than the way Shadyac (following McTaggart’s book //The Field//) adopts the [[Einsein-Podolsky-Rosen|https://en.wikipedia.org/wiki/EPR_paradox]] (EPR) effect in quantum mechanics as the physical basis for a universal “field” that connects all consciousness. Einstein actually thought that EPR disproved quantum physics, but he was incorrect: modern experiments confirm that the quantum world really does exhibit what Einstein dismissively called //spukhäfte fernwirkungen//, or “spooky action at a distance.” But it does so in a very subtle way, which makes it impossible – fundamentally – to use that “spooky action” as a way to communicate. It just doesn’t work that way. 

Shadyac completely misses the point and pushes his half-baked understanding to ridiculous conclusions, for example the idea that random number generators around the world suddenly became correlated on September 11, 2001. This claim actually comes from a Princeton, New Jersey paranormal researcher named [[Roger Nelson|http://noosphere.princeton.edu/]]. (Nelson isn’t at Princeton University, he just lives in Princeton.) I was able to download and read Nelson’s research paper on the subject, which by some terrible error was actually published in a peer-reviewed scientific journal [1]. It’s total nonsense. On September 11 2001, Nelson claims to have measured (and I have no doubt that he did) a correlation in 35 random number generators located around the world that would only occur by chance with a probability of about one in a billion. Impressive, right? The problem is that those random number generators produce about a billion random numbers per year. This means that one-in-a-billion anomalies are certain to be fairly common in Nelson’s data, and if you look hard enough you will certainly find some of these anomalies occurring at the the same time as major world events. Particle physicists call this the “look elsewhere effect,” and a proper statistical treatment accounts for it. It is sadly telling that the correlation in Nelson’s random number generators actually began a few hours //before// the attacks of September 11. Nelson, based on no evidence at all, interprets this as some kind of precognition, but there is a simpler explanation: the whole thing was a coincidence, and a pretty predictable one at that. It certainly has absolutely nothing to do with quantum entanglement. 

There are a few things this movie gets right: all life on this planet is connected, because all life on the planet is related through the marvelous tree of life that Charles Darwin first explained a hundred and fifty years ago. Human beings (and many other animals) really are genetically hard-wired for empathy and altruism [2]. And just like the argon atoms in the film, there are infinitely more molecules in a single cup of water than there are cups of water in all the oceans of the world, so that any drink you take will almost certainly contain a few molecules that once existed in the physical person of the Buddha, seasoned with the tiniest drop of Cleopatra’s sweat. This is poetic, but so what? It’s also all dinosaur piss. We really are all connected, not by some pseudo-mystical quantum field, but by plain cause and effect. This rather mundane conclusion doesn’t diminish our responsibility to act as stewards of the world we live in, but it does mean that we don’t change //anything// by intention alone. 

Shadyac gives himself far too much credit, both for being the problem and for being the solution. When I stood on the bank of the Hudson River just above Chambers Street in Manhattan on a beautiful fall day in 2001 and saw a thousand lives extinguished in an instant, it was quite clear that negative energy from Hollywood was not the responsible party. Six months later when I stood outside Nelson Mandela’s [[tiny cell|https://en.wikipedia.org/wiki/File:Nelson_Mandela%27s_prison_cell,_Robben_Island,_South_Africa.jpg]] at Robben Island Prison in South Africa, it was equally clear that he did not survive twenty-seven years of hard labor because of quantum fairy dust. He did it with blood and bone and the force of human will, and the inspiration he provided helped liberate the nation he loved. 

It is in a way more narcissistic of Shadyac to think that living in a trailer park and riding his bike to work has any effect on this kind of world event than it was for him to live in a mansion and fly on private jets in the first place. For their part, McTaggart and McCraty exploit that singular Western combination of narcissism and guilt by feeding a privileged American like Shadyac the fantasy that he is actually a little god, capable of changing the world with his thoughts. He’s not. 



[1] R. D. Nelson, D. I. Radin, R. Shoup and P. A. Bancel, Foundations of Physics Letters, Volume 15, Number 6, 537 (2002). 

[2] Anyone interested in the evolutionary biology of altruism should read Richard Dawkins’ amazing book [[The Selfish Gene|http://www.amazon.com/The-Selfish-Gene-Edition-Introduction/dp/0199291152]], which provides a beautiful, cogent explanation of how altruism arises through natural selection. It is odd that the film dismisses this book out of hand, because it actually supports the thesis of the movie. 
/***
|Name|ImageSizePlugin|
|Source|http://www.TiddlyTools.com/#ImageSizePlugin|
|Version|1.2.3|
|Author|Eric Shulman|
|License|http://www.TiddlyTools.com/#LegalStatements|
|~CoreVersion|2.1|
|Type|plugin|
|Description|adds support for resizing images|
This plugin adds optional syntax to scale an image to a specified width and height and/or interactively resize the image with the mouse.
!!!!!Usage
<<<
The extended image syntax is:
{{{
[img(w+,h+)[...][...]]
}}}
where ''(w,h)'' indicates the desired width and height (in CSS units, e.g., px, em, cm, in, or %). Use ''auto'' (or a blank value) for either dimension to scale that dimension proportionally (i.e., maintain the aspect ratio). You can also calculate a CSS value 'on-the-fly' by using a //javascript expression// enclosed between """{{""" and """}}""". Appending a plus sign (+) to a dimension enables interactive resizing in that dimension (by dragging the mouse inside the image). Use ~SHIFT-click to show the full-sized (un-scaled) image. Use ~CTRL-click to restore the starting size (either scaled or full-sized).
<<<
!!!!!Examples
<<<
{{{
[img(100px+,75px+)[images/meow2.jpg]]
}}}
[img(100px+,75px+)[images/meow2.jpg]]
{{{
[<img(34%+,+)[images/meow.gif]]
[<img(21% ,+)[images/meow.gif]]
[<img(13%+, )[images/meow.gif]]
[<img( 8%+, )[images/meow.gif]]
[<img( 5% , )[images/meow.gif]]
[<img( 3% , )[images/meow.gif]]
[<img( 2% , )[images/meow.gif]]
[img(  1%+,+)[images/meow.gif]]
}}}
[<img(34%+,+)[images/meow.gif]]
[<img(21% ,+)[images/meow.gif]]
[<img(13%+, )[images/meow.gif]]
[<img( 8%+, )[images/meow.gif]]
[<img( 5% , )[images/meow.gif]]
[<img( 3% , )[images/meow.gif]]
[<img( 2% , )[images/meow.gif]]
[img(  1%+,+)[images/meow.gif]]
{{tagClear{
}}}
<<<
!!!!!Revisions
<<<
2011.09.03 [1.2.3] bypass addStretchHandlers() if no '+' suffix is used (i.e., not resizable)
2010.07.24 [1.2.2] moved tip/dragtip text to config.formatterHelpers.imageSize object to enable customization
2009.02.24 [1.2.1] cleanup width/height regexp, use '+' suffix for resizing
2009.02.22 [1.2.0] added stretchable images
2008.01.19 [1.1.0] added evaluated width/height values
2008.01.18 [1.0.1] regexp for "(width,height)" now passes all CSS values to browser for validation
2008.01.17 [1.0.0] initial release
<<<
!!!!!Code
***/
//{{{
version.extensions.ImageSizePlugin= {major: 1, minor: 2, revision: 3, date: new Date(2011,9,3)};
//}}}
//{{{
var f=config.formatters[config.formatters.findByField("name","image")];
f.match="\\[[<>]?[Ii][Mm][Gg](?:\\([^,]*,[^\\)]*\\))?\\[";
f.lookaheadRegExp=/\[([<]?)(>?)[Ii][Mm][Gg](?:\(([^,]*),([^\)]*)\))?\[(?:([^\|\]]+)\|)?([^\[\]\|]+)\](?:\[([^\]]*)\])?\]/mg;
f.handler=function(w) {
	this.lookaheadRegExp.lastIndex = w.matchStart;
	var lookaheadMatch = this.lookaheadRegExp.exec(w.source)
	if(lookaheadMatch && lookaheadMatch.index == w.matchStart) {
		var floatLeft=lookaheadMatch[1];
		var floatRight=lookaheadMatch[2];
		var width=lookaheadMatch[3];
		var height=lookaheadMatch[4];
		var tooltip=lookaheadMatch[5];
		var src=lookaheadMatch[6];
		var link=lookaheadMatch[7];

		// Simple bracketted link
		var e = w.output;
		if(link) { // LINKED IMAGE
			if (config.formatterHelpers.isExternalLink(link)) {
				if (config.macros.attach && config.macros.attach.isAttachment(link)) {
					// see [[AttachFilePluginFormatters]]
					e = createExternalLink(w.output,link);
					e.href=config.macros.attach.getAttachment(link);
					e.title = config.macros.attach.linkTooltip + link;
				} else
					e = createExternalLink(w.output,link);
			} else 
				e = createTiddlyLink(w.output,link,false,null,w.isStatic);
			addClass(e,"imageLink");
		}

		var img = createTiddlyElement(e,"img");
		if(floatLeft) img.align="left"; else if(floatRight) img.align="right";
		if(width||height) {
			var x=width.trim(); var y=height.trim();
			var stretchW=(x.substr(x.length-1,1)=='+'); if (stretchW) x=x.substr(0,x.length-1);
			var stretchH=(y.substr(y.length-1,1)=='+'); if (stretchH) y=y.substr(0,y.length-1);
			if (x.substr(0,2)=="{{")
				{ try{x=eval(x.substr(2,x.length-4))} catch(e){displayMessage(e.description||e.toString())} }
			if (y.substr(0,2)=="{{")
				{ try{y=eval(y.substr(2,y.length-4))} catch(e){displayMessage(e.description||e.toString())} }
			img.style.width=x.trim(); img.style.height=y.trim();
			if (stretchW||stretchH) config.formatterHelpers.addStretchHandlers(img,stretchW,stretchH);
		}
		if(tooltip) img.title = tooltip;

		// GET IMAGE SOURCE
		if (config.macros.attach && config.macros.attach.isAttachment(src))
			src=config.macros.attach.getAttachment(src); // see [[AttachFilePluginFormatters]]
		else if (config.formatterHelpers.resolvePath) { // see [[ImagePathPlugin]]
			if (config.browser.isIE || config.browser.isSafari) {
				img.onerror=(function(){
					this.src=config.formatterHelpers.resolvePath(this.src,false);
					return false;
				});
			} else
				src=config.formatterHelpers.resolvePath(src,true);
		}
		img.src=src;
		w.nextMatch = this.lookaheadRegExp.lastIndex;
	}
}

config.formatterHelpers.imageSize={
	tip: 'SHIFT-CLICK=show full size, CTRL-CLICK=restore initial size',
	dragtip: 'DRAG=stretch/shrink, '
}

config.formatterHelpers.addStretchHandlers=function(e,stretchW,stretchH) {
	e.title=((stretchW||stretchH)?this.imageSize.dragtip:'')+this.imageSize.tip;
	e.statusMsg='width=%0, height=%1';
	e.style.cursor='move';
	e.originalW=e.style.width;
	e.originalH=e.style.height;
	e.minW=Math.max(e.offsetWidth/20,10);
	e.minH=Math.max(e.offsetHeight/20,10);
	e.stretchW=stretchW;
	e.stretchH=stretchH;
	e.onmousedown=function(ev) { var ev=ev||window.event;
		this.sizing=true;
		this.startX=!config.browser.isIE?ev.pageX:(ev.clientX+findScrollX());
		this.startY=!config.browser.isIE?ev.pageY:(ev.clientY+findScrollY());
		this.startW=this.offsetWidth;
		this.startH=this.offsetHeight;
		return false;
	};
	e.onmousemove=function(ev) { var ev=ev||window.event;
		if (this.sizing) {
			var s=this.style;
			var currX=!config.browser.isIE?ev.pageX:(ev.clientX+findScrollX());
			var currY=!config.browser.isIE?ev.pageY:(ev.clientY+findScrollY());
			var newW=(currX-this.offsetLeft)/(this.startX-this.offsetLeft)*this.startW;
			var newH=(currY-this.offsetTop )/(this.startY-this.offsetTop )*this.startH;
			if (this.stretchW) s.width =Math.floor(Math.max(newW,this.minW))+'px';
			if (this.stretchH) s.height=Math.floor(Math.max(newH,this.minH))+'px';
			clearMessage(); displayMessage(this.statusMsg.format([s.width,s.height]));
		}
		return false;
	};
	e.onmouseup=function(ev) { var ev=ev||window.event;
		if (ev.shiftKey) { this.style.width=this.style.height=''; }
		if (ev.ctrlKey)  { this.style.width=this.originalW; this.style.height=this.originalH; }
		this.sizing=false;
		clearMessage();
		return false;
	};
	e.onmouseout=function(ev) { var ev=ev||window.event;
		this.sizing=false;
		clearMessage();
		return false;
	};
}
//}}}
The plugins in this package provide interactive functionality for importing/exporting tiddlers to/from other TiddlyWiki documents.  Additional plugins provide enhanced local/remote file I/O features, including "save as", "save from web" and "upload" functionality.
/***
|Name|ImportTiddlersPlugin|
|Source|http://www.TiddlyTools.com/#ImportTiddlersPlugin|
|Documentation|http://www.TiddlyTools.com/#ImportTiddlersPluginInfo|
|Version|4.6.2|
|Author|Eric Shulman|
|License|http://www.TiddlyTools.com/#LegalStatements|
|~CoreVersion|2.1|
|Type|plugin|
|Description|interactive controls for import/export with filtering.|
Combine tiddlers from any two TiddlyWiki documents.  Interactively select and copy tiddlers from another TiddlyWiki source document.  Includes prompting for skip, rename, merge or replace actions when importing tiddlers that match existing titles.  When done, a list of all imported tiddlers is written into [[ImportedTiddlers]].
!!!!!Documentation
<<<
see [[ImportTiddlersPluginInfo]] for details
<<<
!!!!!interactive control panel
<<<
<<importTiddlers inline>>
{{clear{
^^(see also: [[ImportTiddlers]] shadow tiddler)^^}}}
<<<
!!!!!Revisions
<<<
2011.02.14 4.6.2 fix OSX error: use picker.file.path
2009.10.10 4.6.1 in createImportPanel, Use {{{window.Components}}} instead of {{{config.browser.isGecko}}} to avoid applying FF3 'file browse' fixup in Chrome.
2009.10.06 4.6.0 added createTiddlerFromFile (import text files)
|please see [[ImportTiddlersPluginInfo]] for additional revision details|
2005.07.20 1.0.0 Initial Release
<<<
!!!!!Code
***/
//{{{
version.extensions.ImportTiddlersPlugin= {major: 4, minor: 6, revision: 2, date: new Date(2011,2,14)};

// IE needs explicit global scoping for functions/vars called from browser events
window.onClickImportButton=onClickImportButton;
window.refreshImportList=refreshImportList;

// default cookie/option values
if (!config.options.chkImportReport) config.options.chkImportReport=true;

// default shadow definition
config.shadowTiddlers.ImportTiddlers='<<importTiddlers inline>>';

// use shadow tiddler content in backstage panel
if (config.tasks) config.tasks.importTask.content='<<tiddler ImportTiddlers>>' // TW2.2 or above
//}}}
//{{{
// backward-compatiblity for TW2.0.x and TW1.2.x
if (config.macros.importTiddlers==undefined) config.macros.importTiddlers={};
if (typeof merge=='undefined') {
	function merge(dst,src,preserveExisting) {
		for(var i in src) { if(!preserveExisting || dst[i] === undefined) dst[i] = src[i]; }
		return dst;
	}
}
if (config.browser.isGecko===undefined)
	config.browser.isGecko=(config.userAgent.indexOf('gecko')!=-1);
//}}}
//{{{
merge(config.macros.importTiddlers,{
	$: function(id) { return document.getElementById(id); }, // abbreviation
	label: 'import tiddlers',
	prompt: 'Copy tiddlers from another document',
	openMsg: 'Opening %0',
	openErrMsg: 'Could not open %0 - error=%1',
	readMsg: 'Read %0 bytes from %1',
	foundMsg: 'Found %0 tiddlers in %1',
	filterMsg: "Filtered %0 tiddlers matching '%1'",
	summaryMsg: '%0 tiddler%1 in the list',
	summaryFilteredMsg: '%0 of %1 tiddler%2 in the list',
	plural: 's are',
	single: ' is',
	countMsg: '%0 tiddlers selected for import',
	processedMsg: 'Processed %0 tiddlers',
	importedMsg: 'Imported %0 of %1 tiddlers from %2',
	loadText: 'please load a document...',
	closeText: 'close',
	doneText: 'done',
	startText: 'import',
	stopText: 'stop',
	local: true,		// default to import from local file
	src: '',		// path/filename or URL of document to import (retrieved from SiteUrl)
	proxy: '',		// URL for remote proxy script (retrieved from SiteProxy)
	useProxy: false,	// use specific proxy script in front of remote URL
	inbound: null,		// hash-indexed array of tiddlers from other document
	newTags: '',		// text of tags added to imported tiddlers
	addTags: true,		// add new tags to imported tiddlers
	listsize: 10,		// # of lines to show in imported tiddler list
	importTags: true,	// include tags from remote source document when importing a tiddler
	keepTags: true,		// retain existing tags when replacing a tiddler
	sync: false,		// add 'server' fields to imported tiddlers (for sync function)
	lastFilter: '',		// most recent filter (URL hash) applied
	lastAction: null,	// most recent collision button performed
	index: 0,		// current processing index in import list
	sort: ''		// sort order for imported tiddler listbox
});
//}}}
//{{{
// hijack core macro handler
if (config.macros.importTiddlers.coreHandler==undefined)
	config.macros.importTiddlers.coreHandler=config.macros.importTiddlers.handler;

config.macros.importTiddlers.handler = function(place,macroName,params,wikifier,paramString,tiddler) {
	if (!params[0] || params[0].toLowerCase()=='core') { // default to built in
		if (config.macros.importTiddlers.coreHandler)
			config.macros.importTiddlers.coreHandler.apply(this,arguments);
		else 
			createTiddlyButton(place,this.label,this.prompt,onClickImportMenu);
	} else if (params[0]=='link') { // show link to floating panel
		createTiddlyButton(place,params[1]||this.label,params[2]||this.prompt,onClickImportMenu);
	} else if (params[0]=='inline') {// show panel as INLINE tiddler content
		createImportPanel(place);
		this.$('importPanel').style.position='static';
		this.$('importPanel').style.display='block';
	} else if (config.macros.loadTiddlers)
		config.macros.loadTiddlers.handler(place,macroName,params); // any other params: loadtiddlers
}
//}}}
//{{{
// Handle link click to create/show/hide control panel
function onClickImportMenu(e) { var e=e||window.event;
	var parent=resolveTarget(e).parentNode;
	var panel=document.getElementById('importPanel');
	if (panel==undefined || panel.parentNode!=parent) panel=createImportPanel(parent);
	var isOpen=panel.style.display=='block';
	if(config.options.chkAnimate)
		anim.startAnimating(new Slider(panel,!isOpen,false,'none'));
	else
		panel.style.display=isOpen?'none':'block';
	e.cancelBubble = true; if (e.stopPropagation) e.stopPropagation(); return(false);
}
//}}}
//{{{
// Create control panel: HTML, CSS
function createImportPanel(place) {
	var cmi=config.macros.importTiddlers; // abbrev
	var panel=cmi.$('importPanel');
	if (panel) { panel.parentNode.removeChild(panel); }
	setStylesheet(store.getTiddlerText('ImportTiddlersPlugin##css'),'importTiddlers');
	panel=createTiddlyElement(place,'span','importPanel',null,null)
	panel.innerHTML=store.getTiddlerText('ImportTiddlersPlugin##html');
	refreshImportList();
	if (!cmi.src.length) cmi.src=store.getTiddlerText('SiteUrl')||'';
	cmi.$('importSourceURL').value=cmi.src;
	if (!cmi.proxy.length) cmi.proxy=store.getTiddlerText('SiteProxy')||'SiteProxy';
	cmi.$('importSiteProxy').value=cmi.proxy;
	if (window.Components) { // FF3 FIXUP
		cmi.$('fileImportSource').style.display='none';
		cmi.$('importLocalPanelFix').style.display='block';
	}
	cmi.$('chkSync').checked=cmi.sync;
	cmi.$('chkImportTags').checked=cmi.importTags;
	cmi.$('chkKeepTags').checked=cmi.keepTags;
	cmi.$('chkAddTags').checked=cmi.addTags;
	cmi.$('txtNewTags').value=cmi.newTags;
	cmi.$('txtNewTags').style.display=cmi.addTags?'block':'none';
	cmi.$('chkSync').checked=cmi.sync;
	cmi.$('chkImportReport').checked=config.options.chkImportReport;
	return panel;
}
//}}}
//{{{
// process control interactions
function onClickImportButton(which,event) {
	var cmi=config.macros.importTiddlers; // abbreviation
	var list=cmi.$('importList'); if (!list) return false;
	var thePanel=cmi.$('importPanel');
	var theCollisionPanel=cmi.$('importCollisionPanel');
	var theNewTitle=cmi.$('importNewTitle');
	var count=0;
	switch (which.id)
		{
		case 'importFromFile':	// show local panel
		case 'importFromWeb':	// show HTTP panel
			cmi.local=(which.id=='importFromFile');
			cmi.showPanel('importLocalPanel',cmi.local);
			cmi.showPanel('importHTTPPanel',!cmi.local);
			break;
		case 'importOptions':	// show/hide options panel
			cmi.showPanel('importOptionsPanel',cmi.$('importOptionsPanel').style.display=='none');
			break;
		case 'fileImportSource':
		case 'importLoad':		// load import source into hidden frame
			importReport();		// if an import was in progress, generate a report
			cmi.inbound=null;	// clear the imported tiddler buffer
			refreshImportList();	// reset/resize the listbox
			if (cmi.src=='') break;
			// Load document, read it's DOM and fill the list
			cmi.loadRemoteFile(cmi.src,cmi.filterTiddlerList);
			break;
		case 'importSelectFeed':	// select a pre-defined systemServer feed URL
			var p=Popup.create(which); if (!p) return false;
			var tids=store.getTaggedTiddlers('systemServer');
			if (!tids.length)
				createTiddlyText(createTiddlyElement(p,'li'),'no pre-defined server feeds');
			for (var t=0; t<tids.length; t++) {
				var u=store.getTiddlerSlice(tids[t].title,'URL');
				var d=store.getTiddlerSlice(tids[t].title,'Description');
				if (!d||!d.length) d=store.getTiddlerSlice(tids[t].title,'description');
				if (!d||!d.length) d=u;
				createTiddlyButton(createTiddlyElement(p,'li'),tids[t].title,d,
					function(){
						var u=this.getAttribute('url');
						document.getElementById('importSourceURL').value=u;
						config.macros.importTiddlers.src=u;
						document.getElementById('importLoad').onclick();
					},
					null,null,null,{url:u});
			}
			Popup.show();
			event.cancelBubble = true;
			if (event.stopPropagation) event.stopPropagation();
			return false;
			// create popup with feed list
			// onselect, insert feed URL into input field.
			break;
		case 'importSelectAll':		// select all tiddler list items (i.e., not headings)
			importReport();		// if an import was in progress, generate a report
			for (var t=0,count=0; t < list.options.length; t++) {
				if (list.options[t].value=='') continue;
				list.options[t].selected=true;
				count++;
			}
			clearMessage(); displayMessage(cmi.countMsg.format([count]));
			cmi.$('importStart').disabled=!count;
			break;
		case 'importSelectNew':		// select tiddlers not in current document
			importReport();		// if an import was in progress, generate a report
			for (var t=0,count=0; t < list.options.length; t++) {
				list.options[t].selected=false;
				if (list.options[t].value=='') continue;
				list.options[t].selected=!store.tiddlerExists(list.options[t].value);
				count+=list.options[t].selected?1:0;
			}
			clearMessage(); displayMessage(cmi.countMsg.format([count]));
			cmi.$('importStart').disabled=!count;
			break;
		case 'importSelectChanges':		// select tiddlers that are updated from existing tiddlers
			importReport();		// if an import was in progress, generate a report
			for (var t=0,count=0; t < list.options.length; t++) {
				list.options[t].selected=false;
				if (list.options[t].value==''||!store.tiddlerExists(list.options[t].value)) continue;
				for (var i=0; i<cmi.inbound.length; i++) // find matching inbound tiddler
					{ var inbound=cmi.inbound[i]; if (inbound.title==list.options[t].value) break; }
				list.options[t].selected=(inbound.modified-store.getTiddler(list.options[t].value).modified>0); // updated tiddler
				count+=list.options[t].selected?1:0;
			}
			clearMessage(); displayMessage(cmi.countMsg.format([count]));
			cmi.$('importStart').disabled=!count;
			break;
		case 'importSelectDifferences':		// select tiddlers that are new or different from existing tiddlers
			importReport();		// if an import was in progress, generate a report
			for (var t=0,count=0; t < list.options.length; t++) {
				list.options[t].selected=false;
				if (list.options[t].value=='') continue;
				if (!store.tiddlerExists(list.options[t].value)) { list.options[t].selected=true; count++; continue; }
				for (var i=0; i<cmi.inbound.length; i++) // find matching inbound tiddler
					{ var inbound=cmi.inbound[i]; if (inbound.title==list.options[t].value) break; }
				list.options[t].selected=(inbound.modified-store.getTiddler(list.options[t].value).modified!=0); // changed tiddler
				count+=list.options[t].selected?1:0;
			}
			clearMessage(); displayMessage(cmi.countMsg.format([count]));
			cmi.$('importStart').disabled=!count;
			break;
		case 'importApplyFilter':	// filter list to include only matching tiddlers
			importReport();		// if an import was in progress, generate a report
			clearMessage();
			if (!cmi.all) // no tiddlers loaded = '0 selected'
				{ displayMessage(cmi.countMsg.format([0])); return false; }
			var hash=cmi.$('importLastFilter').value;
			cmi.inbound=cmi.filterByHash('#'+hash,cmi.all);
			refreshImportList();	// reset/resize the listbox
			break;
		case 'importStart':		// initiate the import processing
			importReport();		// if an import was in progress, generate a report
			cmi.$('importApplyToAll').checked=false;
			cmi.$('importStart').value=cmi.stopText;
			if (cmi.index>0) cmi.index=-1; // stop processing
			else cmi.index=importTiddlers(0); // or begin processing
			importStopped();
			break;
		case 'importClose':		// unload imported tiddlers or hide the import control panel
			// if imported tiddlers not loaded, close the import control panel
			if (!cmi.inbound) { thePanel.style.display='none'; break; }
			importReport();		// if an import was in progress, generate a report
			cmi.inbound=null;	// clear the imported tiddler buffer
			refreshImportList();	// reset/resize the listbox
			break;
		case 'importSkip':	// don't import the tiddler
			cmi.lastAction=which;
			var theItem	= list.options[cmi.index];
			for (var j=0;j<cmi.inbound.length;j++)
			if (cmi.inbound[j].title==theItem.value) break;
			var theImported = cmi.inbound[j];
			theImported.status='skipped after asking';			// mark item as skipped
			theCollisionPanel.style.display='none';
			cmi.index=importTiddlers(cmi.index+1);	// resume with NEXT item
			importStopped();
			break;
		case 'importRename':		// change name of imported tiddler
			cmi.lastAction=which;
			var theItem		= list.options[cmi.index];
			for (var j=0;j<cmi.inbound.length;j++)
			if (cmi.inbound[j].title==theItem.value) break;
			var theImported		= cmi.inbound[j];
			theImported.status	= 'renamed from '+theImported.title;	// mark item as renamed
			theImported.set(theNewTitle.value,null,null,null,null);		// change the tiddler title
			theItem.value		= theNewTitle.value;			// change the listbox item text
			theItem.text		= theNewTitle.value;			// change the listbox item text
			theCollisionPanel.style.display='none';
			cmi.index=importTiddlers(cmi.index);	// resume with THIS item
			importStopped();
			break;
		case 'importMerge':	// join existing and imported tiddler content
			cmi.lastAction=which;
			var theItem	= list.options[cmi.index];
			for (var j=0;j<cmi.inbound.length;j++)
			if (cmi.inbound[j].title==theItem.value) break;
			var theImported	= cmi.inbound[j];
			var theExisting	= store.getTiddler(theItem.value);
			var theText	= theExisting.text+'\n----\n^^merged from: ';
			theText		+='[['+cmi.src+'#'+theItem.value+'|'+cmi.src+'#'+theItem.value+']]^^\n';
			theText		+='^^'+theImported.modified.toLocaleString()+' by '+theImported.modifier+'^^\n'+theImported.text;
			var theDate	= new Date();
			var theTags	= theExisting.getTags()+' '+theImported.getTags();
			theImported.set(null,theText,null,theDate,theTags);
			theImported.status   = 'merged with '+theExisting.title;	// mark item as merged
			theImported.status  += ' - '+theExisting.modified.formatString('MM/DD/YYYY 0hh:0mm:0ss');
			theImported.status  += ' by '+theExisting.modifier;
			theCollisionPanel.style.display='none';
			cmi.index=importTiddlers(cmi.index);	// resume with this item
			importStopped();
			break;
		case 'importReplace':		// substitute imported tiddler for existing tiddler
			cmi.lastAction=which;
			var theItem		  = list.options[cmi.index];
			for (var j=0;j<cmi.inbound.length;j++)
			if (cmi.inbound[j].title==theItem.value) break;
			var theImported     = cmi.inbound[j];
			var theExisting	  = store.getTiddler(theItem.value);
			theImported.status  = 'replaces '+theExisting.title;		// mark item for replace
			theImported.status += ' - '+theExisting.modified.formatString('MM/DD/YYYY 0hh:0mm:0ss');
			theImported.status += ' by '+theExisting.modifier;
			theCollisionPanel.style.display='none';
			cmi.index=importTiddlers(cmi.index);	// resume with THIS item
			importStopped();
			break;
		case 'importListSmaller':		// decrease current listbox size, minimum=5
			if (list.options.length==1) break;
			list.size-=(list.size>5)?1:0;
			cmi.listsize=list.size;
			break;
		case 'importListLarger':		// increase current listbox size, maximum=number of items in list
			if (list.options.length==1) break;
			list.size+=(list.size<list.options.length)?1:0;
			cmi.listsize=list.size;
			break;
		case 'importListMaximize':	// toggle listbox size between current and maximum
			if (list.options.length==1) break;
			list.size=(list.size==list.options.length)?cmi.listsize:list.options.length;
			break;
		}
}
//}}}
//{{{
config.macros.importTiddlers.showPanel=function(place,show,skipAnim) {
	if (typeof place=='string') var place=document.getElementById(place);
	if (!place||!place.style) return;
	if(!skipAnim && anim && config.options.chkAnimate) anim.startAnimating(new Slider(place,show,false,'none'));
	else place.style.display=show?'block':'none';
}
//}}}
//{{{
function refreshImportList(selectedIndex) {
	var cmi=config.macros.importTiddlers; // abbrev
	var list=cmi.$('importList'); if (!list) return;
	// if nothing to show, reset list content and size
	if (!cmi.inbound) {
		while (list.length > 0) { list.options[0] = null; }
		list.options[0]=new Option(cmi.loadText,'',false,false);
		list.size=cmi.listsize;
		cmi.$('importLoad').disabled=false;
		cmi.$('importLoad').style.display='inline';
		cmi.$('importStart').disabled=true;
		cmi.$('importOptions').disabled=true;
		cmi.$('importOptions').style.display='none';
		cmi.$('fileImportSource').disabled=false;
		cmi.$('importFromFile').disabled=false;
		cmi.$('importFromWeb').disabled=false;
		cmi.$('importStart').value=cmi.startText;
		cmi.$('importClose').value=cmi.doneText;
		cmi.$('importSelectPanel').style.display='none';
		cmi.$('importOptionsPanel').style.display='none';
		return;
	}
	// there are inbound tiddlers loaded...
	cmi.$('importLoad').disabled=true;
	cmi.$('importLoad').style.display='none';
	cmi.$('importOptions').style.display='inline';
	cmi.$('importOptions').disabled=false;
	cmi.$('fileImportSource').disabled=true;
	cmi.$('importFromFile').disabled=true;
	cmi.$('importFromWeb').disabled=true;
	cmi.$('importClose').value=cmi.closeText;
	if (cmi.$('importSelectPanel').style.display=='none')
		cmi.showPanel('importSelectPanel',true);

	// get the sort order
	if (!selectedIndex)   selectedIndex=0;
	if (selectedIndex==0) cmi.sort='title';		// heading
	if (selectedIndex==1) cmi.sort='title';
	if (selectedIndex==2) cmi.sort='modified';
	if (selectedIndex==3) cmi.sort='tags';
	if (selectedIndex>3) {
		// display selected tiddler count
		for (var t=0,count=0; t < list.options.length; t++) {
			if (!list.options[t].selected) continue;
			if (list.options[t].value!='')
				count+=1;
			else { // if heading is selected, deselect it, and then select and count all in section
				list.options[t].selected=false;
				for ( t++; t<list.options.length && list.options[t].value!=''; t++) {
					list.options[t].selected=true;
					count++;
				}
			}
		}
		clearMessage(); displayMessage(cmi.countMsg.format([count]));
	}
	cmi.$('importStart').disabled=!count;
	if (selectedIndex>3) return; // no refresh needed

	// get the alphasorted list of tiddlers
	var tiddlers=cmi.inbound;
	tiddlers.sort(function (a,b) {if(a['title'] == b['title']) return(0); else return (a['title'] < b['title']) ? -1 : +1; });
	// clear current list contents
	while (list.length > 0) { list.options[0] = null; }
	// add heading and control items to list
	var i=0;
	var indent=String.fromCharCode(160)+String.fromCharCode(160);
	if (cmi.all.length==tiddlers.length)
		var summary=cmi.summaryMsg.format([tiddlers.length,(tiddlers.length!=1)?cmi.plural:cmi.single]);
	else
		var summary=cmi.summaryFilteredMsg.format([tiddlers.length,cmi.all.length,(cmi.all.length!=1)?cmi.plural:cmi.single]);
	list.options[i++]=new Option(summary,'',false,false);
	list.options[i++]=new Option(((cmi.sort=='title'   )?'>':indent)+' [by title]','',false,false);
	list.options[i++]=new Option(((cmi.sort=='modified')?'>':indent)+' [by date]','',false,false);
	list.options[i++]=new Option(((cmi.sort=='tags')?'>':indent)+' [by tags]','',false,false);
	// output the tiddler list
	switch(cmi.sort) {
		case 'title':
			for(var t = 0; t < tiddlers.length; t++)
				list.options[i++] = new Option(tiddlers[t].title,tiddlers[t].title,false,false);
			break;
		case 'modified':
			// sort descending for newest date first
			tiddlers.sort(function (a,b) {if(a['modified'] == b['modified']) return(0); else return (a['modified'] > b['modified']) ? -1 : +1; });
			var lastSection = '';
			for(var t = 0; t < tiddlers.length; t++) {
				var tiddler = tiddlers[t];
				var theSection = tiddler.modified.toLocaleDateString();
				if (theSection != lastSection) {
					list.options[i++] = new Option(theSection,'',false,false);
					lastSection = theSection;
				}
				list.options[i++] = new Option(indent+indent+tiddler.title,tiddler.title,false,false);
			}
			break;
		case 'tags':
			var theTitles = {}; // all tiddler titles, hash indexed by tag value
			var theTags = new Array();
			for(var t=0; t<tiddlers.length; t++) {
				var title=tiddlers[t].title;
				var tags=tiddlers[t].tags;
				if (!tags || !tags.length) {
					if (theTitles['untagged']==undefined) { theTags.push('untagged'); theTitles['untagged']=new Array(); }
					theTitles['untagged'].push(title);
				}
				else for(var s=0; s<tags.length; s++) {
					if (theTitles[tags[s]]==undefined) { theTags.push(tags[s]); theTitles[tags[s]]=new Array(); }
					theTitles[tags[s]].push(title);
				}
			}
			theTags.sort();
			for(var tagindex=0; tagindex<theTags.length; tagindex++) {
				var theTag=theTags[tagindex];
				list.options[i++]=new Option(theTag,'',false,false);
				for(var t=0; t<theTitles[theTag].length; t++)
					list.options[i++]=new Option(indent+indent+theTitles[theTag][t],theTitles[theTag][t],false,false);
			}
			break;
		}
	list.selectedIndex=selectedIndex;		  // select current control item
	if (list.size<cmi.listsize) list.size=cmi.listsize;
	if (list.size>list.options.length) list.size=list.options.length;
}
//}}}
//{{{
// re-entrant processing for handling import with interactive collision prompting
function importTiddlers(startIndex) {
	var cmi=config.macros.importTiddlers; // abbrev
	if (!cmi.inbound) return -1;
	var list=cmi.$('importList'); if (!list) return;
	var t;
	// if starting new import, reset import status flags
	if (startIndex==0)
		for (var t=0;t<cmi.inbound.length;t++)
			cmi.inbound[t].status='';
	for (var i=startIndex; i<list.options.length; i++) {
		// if list item is not selected or is a heading (i.e., has no value), skip it
		if ((!list.options[i].selected) || ((t=list.options[i].value)==''))
			continue;
		for (var j=0;j<cmi.inbound.length;j++)
			if (cmi.inbound[j].title==t) break;
		var inbound = cmi.inbound[j];
		var theExisting = store.getTiddler(inbound.title);
		// avoid redundant import for tiddlers that are listed multiple times (when 'by tags')
		if (inbound.status=='added')
			continue;
		// don't import the 'ImportedTiddlers' history from the other document...
		if (inbound.title=='ImportedTiddlers')
			continue;
		// if tiddler exists and import not marked for replace or merge, stop importing
		if (theExisting && (inbound.status.substr(0,7)!='replace') && (inbound.status.substr(0,5)!='merge'))
			return i;
		// assemble tags (remote + existing + added)
		var newTags = '';
		if (cmi.importTags)
			newTags+=inbound.getTags()	// import remote tags
		if (cmi.keepTags && theExisting)
			newTags+=' '+theExisting.getTags(); // keep existing tags
		if (cmi.addTags && cmi.newTags.trim().length)
			newTags+=' '+cmi.newTags; // add new tags
		inbound.set(null,null,null,null,newTags.trim());
		// set the status to 'added' (if not already set by the 'ask the user' UI)
		inbound.status=(inbound.status=='')?'added':inbound.status;
		// set sync fields
		if (cmi.sync) {
			if (!inbound.fields) inbound.fields={}; // for TW2.1.x backward-compatibility
			inbound.fields['server.page.revision']=inbound.modified.convertToYYYYMMDDHHMM();
			inbound.fields['server.type']='file';
			inbound.fields['server.host']=(cmi.local&&!cmi.src.startsWith('file:')?'file:///':'')+cmi.src;
		}
		// do the import!
		store.suspendNotifications();
		store.saveTiddler(inbound.title, inbound.title, inbound.text, inbound.modifier, inbound.modified, inbound.tags, inbound.fields, true, inbound.created);
                store.fetchTiddler(inbound.title).created = inbound.created; // force creation date to imported value (needed for TW2.1.x and earlier)
		store.resumeNotifications();
		}
	return(-1);	// signals that we really finished the entire list
}
function importStopped() {
	var cmi=config.macros.importTiddlers; // abbrev
	var list=cmi.$('importList'); if (!list) return;
	var theNewTitle=cmi.$('importNewTitle');
	if (cmi.index==-1){ 
		cmi.$('importStart').value=cmi.startText;
		importReport();	// import finished... generate the report
	} else {
		// import collision...
		// show the collision panel and set the title edit field
		cmi.$('importStart').value=cmi.stopText;
		cmi.showPanel('importCollisionPanel',true);
		theNewTitle.value=list.options[cmi.index].value;
		if (cmi.$('importApplyToAll').checked && cmi.lastAction && cmi.lastAction.id!='importRename')
			onClickImportButton(cmi.lastAction);
	}
}
//}}}
//{{{
function importReport() {
	var cmi=config.macros.importTiddlers; // abbrev
	if (!cmi.inbound) return;
	// if import was not completed, the collision panel will still be open... close it now.
	var panel=cmi.$('importCollisionPanel'); if (panel) panel.style.display='none';
	// get the alphasorted list of tiddlers
	var tiddlers = cmi.inbound;
	// gather the statistics
	var count=0; var total=0;
	for (var t=0; t<tiddlers.length; t++) {
		if (!tiddlers[t].status || !tiddlers[t].status.trim().length) continue;
		if (tiddlers[t].status.substr(0,7)!='skipped') count++;
		total++;
	}
	// generate a report
	if (total) displayMessage(cmi.processedMsg.format([total]));
	if (count && config.options.chkImportReport) {
		// get/create the report tiddler
		var theReport = store.getTiddler('ImportedTiddlers');
		if (!theReport) { theReport=new Tiddler(); theReport.title='ImportedTiddlers'; theReport.text=''; }
		// format the report content
		var now = new Date();
		var newText = 'On '+now.toLocaleString()+', '+config.options.txtUserName
		newText +=' imported '+count+' tiddler'+(count==1?'':'s')+' from\n[['+cmi.src+'|'+cmi.src+']]:\n';
		if (cmi.addTags && cmi.newTags.trim().length)
			newText += 'imported tiddlers were tagged with: "'+cmi.newTags+'"\n';
		newText += '<<<\n';
		for (var t=0; t<tiddlers.length; t++) if (tiddlers[t].status)
			newText += '#[['+tiddlers[t].title+']] - '+tiddlers[t].status+'\n';
		newText += '<<<\n';
		// update the ImportedTiddlers content and show the tiddler
		theReport.text	 = newText+((theReport.text!='')?'\n----\n':'')+theReport.text;
		theReport.modifier = config.options.txtUserName;
		theReport.modified = new Date();
                store.saveTiddler(theReport.title, theReport.title, theReport.text, theReport.modifier, theReport.modified, theReport.tags, theReport.fields);
		story.displayTiddler(null,theReport.title,1,null,null,false);
		story.refreshTiddler(theReport.title,1,true);
	}
	// reset status flags
	for (var t=0; t<cmi.inbound.length; t++) cmi.inbound[t].status='';
	// mark document as dirty and let display update as needed
	if (count) { store.setDirty(true); store.notifyAll(); }
	// always show final message when tiddlers were actually loaded
	if (count) displayMessage(cmi.importedMsg.format([count,tiddlers.length,cmi.src.replace(/%20/g,' ')]));
}
//}}}
//{{{
// // File and XMLHttpRequest I/O
config.macros.importTiddlers.askForFilename=function(here) {
	var msg=here.title; // use tooltip as dialog box message
	var path=getLocalPath(document.location.href);
	var slashpos=path.lastIndexOf('/'); if (slashpos==-1) slashpos=path.lastIndexOf('\\'); 
	if (slashpos!=-1) path = path.substr(0,slashpos+1); // remove filename from path, leave the trailing slash
	var file='';
	var result='';
	if(window.Components) { // moz
		try {
			netscape.security.PrivilegeManager.enablePrivilege('UniversalXPConnect');

			var nsIFilePicker = window.Components.interfaces.nsIFilePicker;
			var picker = Components.classes['@mozilla.org/filepicker;1'].createInstance(nsIFilePicker);
			picker.init(window, msg, nsIFilePicker.modeOpen);
			var thispath = Components.classes['@mozilla.org/file/local;1'].createInstance(Components.interfaces.nsILocalFile);
			thispath.initWithPath(path);
			picker.displayDirectory=thispath;
			picker.defaultExtension='html';
			picker.defaultString=file;
			picker.appendFilters(nsIFilePicker.filterAll|nsIFilePicker.filterText|nsIFilePicker.filterHTML);
			if (picker.show()!=nsIFilePicker.returnCancel) var result=picker.file.path;
		}
		catch(e) { alert('error during local file access: '+e.toString()) }
	}
	else { // IE
		try { // XPSP2 IE only
			var s = new ActiveXObject('UserAccounts.CommonDialog');
			s.Filter='All files|*.*|Text files|*.txt|HTML files|*.htm;*.html|';
			s.FilterIndex=3; // default to HTML files;
			s.InitialDir=path;
			s.FileName=file;
			if (s.showOpen()) var result=s.FileName;
		}
		catch(e) {  // fallback
			var result=prompt(msg,path+file);
		}
	}
	return result;
}

config.macros.importTiddlers.loadRemoteFile = function(src,callback) {
	if (src==undefined || !src.length) return null; // filename is required
	var original=src; // URL as specified
	var hashpos=src.indexOf('#'); if (hashpos!=-1) src=src.substr(0,hashpos); // URL with #... suffix removed (needed for IE)
	clearMessage();
	displayMessage(this.openMsg.format([src.replace(/%20/g,' ')]));
	if (src.substr(0,5)!='http:' && src.substr(0,5)!='file:') { // if not a URL, read from local filesystem
		var txt=loadFile(src);
		if (!txt) { // file didn't load, might be relative path.. try fixup
			var pathPrefix=document.location.href;  // get current document path and trim off filename
			var slashpos=pathPrefix.lastIndexOf('/'); if (slashpos==-1) slashpos=pathPrefix.lastIndexOf('\\'); 
			if (slashpos!=-1 && slashpos!=pathPrefix.length-1) pathPrefix=pathPrefix.substr(0,slashpos+1);
			src=pathPrefix+src;
			if (pathPrefix.substr(0,5)!='http:') src=getLocalPath(src);
			var txt=loadFile(src);
		}
		if (!txt) { // file still didn't load, report error
			displayMessage(config.macros.importTiddlers.openErrMsg.format([src.replace(/%20/g,' '),'(filesystem error)']));
		} else {
			displayMessage(config.macros.importTiddlers.readMsg.format([txt.length,src.replace(/%20/g,' ')]));
			if (version.major+version.minor*.1+version.revision*.01!=2.52) txt=convertUTF8ToUnicode(txt);
			if (callback) callback(true,original,txt,src,null);
		}
	} else {
		doHttp('GET',src,null,null,config.options.txtRemoteUsername,config.options.txtRemotePassword,callback,original,null);
	}
}

config.macros.importTiddlers.readTiddlersFromHTML=function(html){
	var remoteStore=new TiddlyWiki();
	remoteStore.importTiddlyWiki(html);
	return remoteStore.getTiddlers('title');	
}

config.macros.importTiddlers.readTiddlersFromCSV=function(CSV){
	var remoteStore=new TiddlyWiki();
	// GET NAMES
	var lines=CSV.replace(/\r/g,'').split('\n');
	var names=lines.shift().replace(/"/g,'').split(',');
	CSV=lines.join('\n');
	// ENCODE commas and newlines within quoted values
	var comma='!~comma~!'; var commaRE=new RegExp(comma,'g');
	var newline='!~newline~!'; var newlineRE=new RegExp(newline,'g');
	CSV=CSV.replace(/"([^"]*?)"/g,
		function(x){ return x.replace(/\,/g,comma).replace(/\n/g,newline); });
	// PARSE lines
	var lines=CSV.split('\n');
	for (var i=0; i<lines.length; i++) { if (!lines[i].length) continue;
		var values=lines[i].split(',');
		// DECODE commas, newlines, and doubled-quotes, and remove enclosing quotes (if any)
		for (var v=0; v<values.length; v++)
			values[v]=values[v].replace(commaRE,',').replace(newlineRE,'\n')
				.replace(/^"|"$/g,'').replace(/""/g,'"');
		// EXTRACT tiddler values
		var title=''; var text=''; var tags=[]; var fields={};
		var created=null; var when=new Date(); var who=config.options.txtUserName;
		for (var v=0; v<values.length; v++) { var val=values[v];
			if (names[v]) switch(names[v].toLowerCase()) {
				case 'title':	title=val.replace(/\[\]\|/g,'_'); break;
				case 'created': created=new Date(val); break;
				case 'modified':when=new Date(val); break;
				case 'modifier':who=val; break;
				case 'text':	text=val; break;
				case 'tags':	tags=val.readBracketedList(); break;
				default:	fields[names[v].toLowerCase()]=val; break;
			}
		}
		// CREATE tiddler in temporary store
		if (title.length)
			remoteStore.saveTiddler(title,title,text,who,when,tags,fields,true,created||when);
	}
	return remoteStore.getTiddlers('title');
}

config.macros.importTiddlers.createTiddlerFromFile=function(src,txt) {
	var t=new Tiddler();
	var pos=src.lastIndexOf("/"); if (pos==-1) pos=src.lastIndexOf("\\");
	t.title=pos==-1?src:src.substr(pos+1);
	t.text=txt; 
	t.created=t.modified=new Date();
	t.modifier=config.options.txtUserName;
	if (src.substr(src.length-3,3)=='.js') t.tags=['systemConfig'];
	return [t];
}

config.macros.importTiddlers.filterTiddlerList=function(success,params,txt,src,xhr){
	var cmi=config.macros.importTiddlers; // abbreviation
	var src=src.replace(/%20/g,' ');
	if (!success) { displayMessage(cmi.openErrMsg.format([src,xhr.status])); return; }
	cmi.all=cmi.readTiddlersFromHTML(txt);
	if (!cmi.all||!cmi.all.length) cmi.all=cmi.readTiddlersFromCSV(txt)
	if (!cmi.all||!cmi.all.length) cmi.all=cmi.createTiddlerFromFile(src,txt)
	var count=cmi.all?cmi.all.length:0;
	var querypos=src.lastIndexOf('?'); if (querypos!=-1) src=src.substr(0,querypos);
	displayMessage(cmi.foundMsg.format([count,src]));
	cmi.inbound=cmi.filterByHash(params,cmi.all); // use full URL including hash (if any)
	cmi.$('importLastFilter').value=cmi.lastFilter;
	window.refreshImportList(0);
}

config.macros.importTiddlers.filterByHash=function(src,tiddlers){
	var hashpos=src.lastIndexOf('#'); if (hashpos==-1) return tiddlers;
	var hash=src.substr(hashpos+1); if (!hash.length) return tiddlers;
	var tids=[];
	var params=hash.parseParams('anon',null,true,false,false);
	for (var p=1; p<params.length; p++) {
		switch (params[p].name) {
			case 'anon':
			case 'open':
				tids.pushUnique(params[p].value);
				break;
			case 'tag':
				if (store.getMatchingTiddlers) { // for boolean expressions - see MatchTagsPlugin
					var r=store.getMatchingTiddlers(params[p].value,null,tiddlers);
					for (var t=0; t<r.length; t++) tids.pushUnique(r[t].title);
				} else for (var t=0; t<tiddlers.length; t++)
					if (tiddlers[t].isTagged(params[p].value))
						tids.pushUnique(tiddlers[t].title);
				break;
			case 'story':
				for (var t=0; t<tiddlers.length; t++)
					if (tiddlers[t].title==params[p].value) {
						tiddlers[t].changed();
						for (var s=0; s<tiddlers[t].links.length; s++)
							tids.pushUnique(tiddlers[t].links[s]);
						break;
					}
				break;
			case 'search':
				for (var t=0; t<tiddlers.length; t++)
					if (tiddlers[t].text.indexOf(params[p].value)!=-1)
						tids.pushUnique(tiddlers[t].title);
				break;
		}
	}
	var matches=[];
	for (var t=0; t<tiddlers.length; t++)
		if (tids.contains(tiddlers[t].title))
			matches.push(tiddlers[t]);
	displayMessage(config.macros.importTiddlers.filterMsg.format([matches.length,hash]));
	config.macros.importTiddlers.lastFilter=hash;
	return matches;
}
//}}}
/***
!!!Control panel CSS
//{{{
!css
#importPanel {
	display: none; position:absolute; z-index:11; width:35em; right:105%; top:3em;
	background-color: #eee; color:#000; font-size: 8pt; line-height:110%;
	border:1px solid black; border-bottom-width: 3px; border-right-width: 3px;
	padding: 0.5em; margin:0em; -moz-border-radius:1em;-webkit-border-radius:1em;
}
#importPanel a, #importPanel td a { color:#009; display:inline; margin:0px; padding:1px; }
#importPanel table { width:100%; border:0px; padding:0px; margin:0px; font-size:8pt; line-height:110%; background:transparent; }
#importPanel tr { border:0px;padding:0px;margin:0px; background:transparent; }
#importPanel td { color:#000; border:0px;padding:0px;margin:0px; background:transparent; }
#importPanel select { width:100%;margin:0px;font-size:8pt;line-height:110%;}
#importPanel input  { width:98%;padding:0px;margin:0px;font-size:8pt;line-height:110%}
#importPanel .box { border:1px solid #000; background-color:#eee; padding:3px 5px; margin-bottom:5px; -moz-border-radius:5px;-webkit-border-radius:5px;}
#importPanel .topline { border-top:1px solid #999; padding-top:2px; margin-top:2px; }
#importPanel .rad { width:auto; }
#importPanel .chk { width:auto; margin:1px;border:0; }
#importPanel .btn { width:auto; }
#importPanel .btn1 { width:98%; }
#importPanel .btn2 { width:48%; }
#importPanel .btn3 { width:32%; }
#importPanel .btn4 { width:23%; }
#importPanel .btn5 { width:19%; }
#importPanel .importButton { padding: 0em; margin: 0px; font-size:8pt; }
#importPanel .importListButton { padding:0em 0.25em 0em 0.25em; color: #000000; display:inline }
#backstagePanel #importPanel { left:10%; right:auto; }
!end
//}}}
!!!Control panel HTML
//{{{
!html
<!-- source and report -->
<table><tr><td align=left>
	import from
	<input type="radio" class="rad" name="importFrom" id="importFromFile" value="file" CHECKED
		onclick="onClickImportButton(this,event)" title="show file controls"> local file
	<input type="radio" class="rad" name="importFrom" id="importFromWeb"  value="http"
		onclick="onClickImportButton(this,event)" title="show web controls"> web server
</td><td align=right>
	<input type=checkbox class="chk" id="chkImportReport"
		onClick="config.options['chkImportReport']=this.checked;"> create report
</td></tr></table>

<div class="box" id="importSourcePanel" style="margin:.5em">
<div id="importLocalPanel" style="display:block;margin-bottom:2px;"><!-- import from local file  -->
enter or browse for source path/filename<br>
<input type="file" id="fileImportSource" size=57 style="width:100%"
	onKeyUp="config.macros.importTiddlers.src=this.value"
	onChange="config.macros.importTiddlers.src=this.value;document.getElementById('importLoad').onclick()">
<div id="importLocalPanelFix" style="display:none"><!-- FF3 FIXUP -->
	<input type="text" id="fileImportSourceFix" style="width:90%"
		title="Enter a path/file to import"
		onKeyUp="config.macros.importTiddlers.src=this.value"
		onChange="config.macros.importTiddlers.src=this.value;document.getElementById('importLoad').onclick()">
	<input type="button" id="fileImportSourceFixButton" style="width:7%" value="..."
		title="Select a path/file to import"
		onClick="var r=config.macros.importTiddlers.askForFilename(this); if (!r||!r.length) return;
			document.getElementById('fileImportSourceFix').value=r;
			config.macros.importTiddlers.src=r;
			document.getElementById('importLoad').onclick()">
</div><!--end FF3 FIXUP-->
</div><!--end local-->
<div id="importHTTPPanel" style="display:none;margin-bottom:2px;"><!-- import from http server -->
<table><tr><td align=left>
	enter a URL or <a href="javascript:;" id="importSelectFeed"
		onclick="return onClickImportButton(this,event)" title="select a pre-defined 'systemServer' URL">
		select a server</a><br>
</td><td align=right>
	<input type="checkbox" class="chk" id="importUsePassword"
		onClick="config.macros.importTiddlers.usePassword=this.checked;
			config.macros.importTiddlers.showPanel('importIDPWPanel',this.checked,true);">password
	<input type="checkbox" class="chk" id="importUseProxy"
		onClick="config.macros.importTiddlers.useProxy=this.checked;
			config.macros.importTiddlers.showPanel('importSiteProxy',this.checked,true);">proxy
</td></tr></table>
<input type="text" id="importSiteProxy" style="display:none;margin-bottom:1px" onfocus="this.select()" value="SiteProxy"
	onKeyUp="config.macros.importTiddlers.proxy=this.value"
	onChange="config.macros.importTiddlers.proxy=this.value;">
<input type="text" id="importSourceURL" onfocus="this.select()" value="SiteUrl"
	onKeyUp="config.macros.importTiddlers.src=this.value"
	onChange="config.macros.importTiddlers.src=this.value;">
<div id="importIDPWPanel" style="text-align:center;margin-top:2px;display:none";>
username: <input type=text id="txtImportID" style="width:25%" 
	onChange="config.options.txtRemoteUsername=this.value;">
 password: <input type=password id="txtImportPW" style="width:25%" 
	onChange="config.options.txtRemotePassword=this.value;">
</div><!--end idpw-->
</div><!--end http-->
</div><!--end source-->

<div class="box" id="importSelectPanel" style="display:none;margin:.5em;">
<table><tr><td align=left>
select:
<a href="javascript:;" id="importSelectAll"
	onclick="return onClickImportButton(this)" title="SELECT all tiddlers">
	all</a>
&nbsp;<a href="javascript:;" id="importSelectNew"
	onclick="return onClickImportButton(this)" title="SELECT tiddlers not already in destination document">
	added</a>
&nbsp;<a href="javascript:;" id="importSelectChanges"
	onclick="return onClickImportButton(this)" title="SELECT tiddlers that have been updated in source document">
	changes</a>
&nbsp;<a href="javascript:;" id="importSelectDifferences"
	onclick="return onClickImportButton(this)" title="SELECT tiddlers that have been added or are different from existing tiddlers">
	differences</a>
</td><td align=right>
<a href="javascript:;" id="importListSmaller"
	onclick="return onClickImportButton(this)" title="SHRINK list size">
	&nbsp;&#150;&nbsp;</a>
<a href="javascript:;" id="importListLarger"
	onclick="return onClickImportButton(this)" title="GROW list size">
	&nbsp;+&nbsp;</a>
<a href="javascript:;" id="importListMaximize"
	onclick="return onClickImportButton(this)" title="MAXIMIZE/RESTORE list size">
	&nbsp;=&nbsp;</a>
</td></tr></table>
<select id="importList" size=8 multiple
	onchange="setTimeout('refreshImportList('+this.selectedIndex+')',1)">
	<!-- NOTE: delay refresh so list is updated AFTER onchange event is handled -->
</select>
<div style="text-align:center">
	<a href="javascript:;"
		title="click for help using filters..."
		onclick="alert('A filter consists of one or more space-separated combinations of: tiddlertitle, tag:[[tagvalue]], tag:[[tag expression]] (requires MatchTagsPlugin), story:[[TiddlerName]], and/or search:[[searchtext]]. Use a blank filter to restore the list of all tiddlers.'); return false;"
	>filter</a>
	<input type="text" id="importLastFilter" style="margin-bottom:1px; width:65%"
		title="Enter a combination of one or more filters. Use a blank filter for all tiddlers."
		onfocus="this.select()" value=""
		onKeyUp="config.macros.importTiddlers.lastFilter=this.value"
		onChange="config.macros.importTiddlers.lastFilter=this.value;">
	<input type="button" id="importApplyFilter" style="width:20%" value="apply"
		title="filter list of tiddlers to include only those that match certain criteria"
		onclick="return onClickImportButton(this)">
	</div>
</div><!--end select-->

<div class="box" id="importOptionsPanel" style="text-align:center;margin:.5em;display:none;">
	apply tags: <input type=checkbox class="chk" id="chkImportTags" checked
		onClick="config.macros.importTiddlers.importTags=this.checked;">from source&nbsp;
	<input type=checkbox class="chk" id="chkKeepTags" checked
		onClick="config.macros.importTiddlers.keepTags=this.checked;">keep existing&nbsp;
	<input type=checkbox class="chk" id="chkAddTags" 
		onClick="config.macros.importTiddlers.addTags=this.checked;
			config.macros.importTiddlers.showPanel('txtNewTags',this.checked,false);
			if (this.checked) document.getElementById('txtNewTags').focus();">add tags<br>
	<input type=text id="txtNewTags" style="margin-top:4px;display:none;" size=15 onfocus="this.select()" 
		title="enter tags to be added to imported tiddlers" 
		onKeyUp="config.macros.importTiddlers.newTags=this.value;
		document.getElementById('chkAddTags').checked=this.value.length>0;" autocomplete=off>
	<nobr><input type=checkbox class="chk" id="chkSync" 
		onClick="config.macros.importTiddlers.sync=this.checked;">
		link tiddlers to source document (for sync later)</nobr>
</div><!--end options-->

<div id="importButtonPanel" style="text-align:center">
	<input type=button id="importLoad"	class="importButton btn3" value="open"
		title="load listbox with tiddlers from source document"
		onclick="onClickImportButton(this)">
	<input type=button id="importOptions"	class="importButton btn3" value="options..."
		title="set options for tags, sync, etc."
		onclick="onClickImportButton(this)">
	<input type=button id="importStart"	class="importButton btn3" value="import"
		title="start/stop import of selected source tiddlers into current document"
		onclick="onClickImportButton(this)">
	<input type=button id="importClose"	class="importButton btn3" value="done"
		title="clear listbox or hide control panel"
		onclick="onClickImportButton(this)">
</div>

<div class="none" id="importCollisionPanel" style="display:none;margin:.5em 0 .5em .5em;">
	<table><tr><td style="width:65%" align="left">
		<table><tr><td align=left>
			tiddler already exists:
		</td><td align=right>
			<input type=checkbox class="chk" id="importApplyToAll" 
			onclick="document.getElementById('importRename').disabled=this.checked;"
			checked>apply to all
		</td></tr></table>
		<input type=text id="importNewTitle" size=15 autocomplete=off">
	</td><td style="width:34%" align="center">
		<input type=button id="importMerge"
			class="importButton" style="width:47%" value="merge"
			title="append the incoming tiddler to the existing tiddler"
			onclick="onClickImportButton(this)"><!--
		--><input type=button id="importSkip"
			class="importButton" style="width:47%" value="skip"
			title="do not import this tiddler"
			onclick="onClickImportButton(this)"><!--
		--><br><input type=button id="importRename"
			class="importButton" style="width:47%" value="rename"
			title="rename the incoming tiddler"
			onclick="onClickImportButton(this)"><!--
		--><input type=button id="importReplace"
			class="importButton" style="width:47%" value="replace"
			title="discard the existing tiddler"
			onclick="onClickImportButton(this)">
	</td></tr></table>
</div><!--end collision-->
!end
//}}}
***/
 
/***
|Name|ImportTiddlersPluginInfo|
|Source|http://www.TiddlyTools.com/#ImportTiddlersPlugin|
|Documentation|http://www.TiddlyTools.com/#ImportTiddlersPluginInfo|
|Version|4.6.0|
|Author|Eric Shulman|
|License|http://www.TiddlyTools.com/#LegalStatements|
|~CoreVersion|2.1|
|Type|documentation|
|Description|documentation for ImportTiddlersPlugin|
Combine tiddlers from any two TiddlyWiki documents.  An interactive control panel lets you pick a source document and import selected tiddlers, with prompting for skip, rename, merge or replace actions when importing tiddlers that match existing titles.  Generates a detailed report of import 'history' in ImportedTiddlers.
!!!!!Usage
<<<
{{{<<importTiddlers>>}}} or {{{<<importTiddlers core>>}}}
invokes the built-in importTiddlers macro (TW2.1.x+).  If installed in documents using TW2.0.x or earlier, fallback is to use 'link' display (see below)

{{{<<importTiddlers link label tooltip>>}}}
The ''link'' keyword creates an "import tiddlers" link that when clicked to show/hide import control panel.  ''label'' and ''tooltip'' are optional text parameters (enclosed in quotes or {{{[[...]]}}}, and allow you to override the default display text for the link and the mouseover help text, respectively.

{{{<<importTiddlers inline>>}}}
creates import control panel directly in tiddler content

<<importTiddlers inline>>

Enter a document URL or press "..." to select a TiddlyWiki file to import, and then press ''[open]''.  //Note: There may be a delay before the list of tiddlers appears.//  Use the ''[-]'', ''[+]'', or ''[=]'' links to adjust the listbox size so you can view more (or less) tiddler titles at one time.

Select one or more titles from the listbox.  Use CTRL-click or SHIFT-click to select/deselect individual titles.  Click on ''all'', ''new'', ''changes'', or ''differences'' to automatically select a subset of tiddlers from the list, based on a comparison of the two documents:
*''all'' selects ALL tiddlers from the import source document, even if they have not been changed.
*''new'' selects only tiddlers that are found in the import source document, but do not yet exist in the destination document
*''changes'' selects only tiddlers that exist in both documents but that are newer in the source document
*''differences'' selects all new and existing tiddlers that are different from the destination document (even if destination tiddler is newer)

Press ''[import]'' to begin copying tiddlers to the current document.  If an 'inbound' tiddler matches one that already exists in the document, the import process pauses and the tiddler title is displayed in an input field, along with four push buttons: ''skip'', ''rename'', ''merge'' and ''replace''.
* to bypass importing the tiddler, press ''skip''
* to give the inbound tiddler a different name, so that both the old and new tiddlers will exist when the import is done, enter a new title in the input field and press ''rename'' 
* to combine the content from both tiddlers into a single tiddler so you can then edit it later to eliminate unwanted content, press ''merge''
* to overwrite the existing tiddler with the imported one (discarding the previous content), press ''[replace]''

''Import Report History''

Whenever tiddlers are imported, a report is generated into a tiddler named [[ImportedTiddlers]], recording when the latest import was performed, the number of tiddlers successfully imported, from what location, and by whom, as well as a list of the tiddlers that were processed.  When more tiddlers are imported at a later time, a new report is //added// to the existing [[ImportedTiddlers]], above the previous report (i.e., at the top of the tiddler), so that a history of imports is maintained.  If this record is not desired, you can delete [[ImportedTiddlers]] at any time.

Note: You can prevent a report from being generated for any given import activity by clearing the "create a report" checkbox before pressing the ''import'' button
<<<
!!!!!Installation Notes
<<<
* As of 6/27/2007, support for TW2.1.x and earlier have been moved to [[ImportTiddlersPluginPatch]].  ''//Only install the patch plugin when using TW2.1.x or earlier.//''
<<<
!!!!!Revisions
<<<
2009.10.06 4.6.0 added createTiddlerFromFile (import text files)
2009.09.27 4.5.5 in readTiddlersFromCSV(), strip \r from input and fixed handling for quoted values
2009.09.12 4.5.4 fixed 'return false' to prevent IE page transition. Also, moved html/css definitions to separate sections
2009.08.23 4.5.3 in importTiddlers(), add 'file:///' to local server.host sync field only if not already present in URL
2009.08.20 4.5.2 only use SiteURL/SiteProxy values if control panel value has not yet been set
2009.07.03 4.5.1 fixups for TW252: doHttp() doesn't return XHR and convertUTF8ToUnicode() not needed for local I/O
2009.05.04 4.5.0 import from CSV-formatted files
2009.03.04 4.4.2 in createImportPanel(), init option checkboxes so display matches internal state variables
2009.02.26 4.4.1 use macro-specific definition of $() function abbreviation (avoids conflict with JQuery)
2008.09.30 4.4.0 added fallback definition of merge() for use with TW2.0.x and TW1.2.x
2008.08.12 4.3.3 rewrite backstage and shadow tiddler definitions for easier customization
2008.08.05 4.3.2 rewrote loadRemoteFile() to eliminate use of platform-specific fileExists() function
2008.06.29 4.3.1 More layout/animation work for simpler sequential interaction.  Code reduction/cleanup
2008.06.28 4.3.0 HTML and CSS cleanup and tweaks to layout.  Added animation to panels
2008.06.22 4.2.0 For FireFox, use HTML with separate text+button control instead of type='file' control
2008.06.05 4.1.0 in filterByHash(), added support for boolean tag expressions using getMatchingTiddlers() (defined by MatchTagsPlugin)
2008.05.12 4.0.2 automatically tweak the backstage "import" task to add the ImportTiddlers control panel
2008.04.30 4.0.1 trim #... suffix for loading files/URLs in IE
2008.04.30 4.0.0 added source filtering (using URL paramifiers).  Also, abbreviations for code-size reduction.
2008.04.13 3.9.0 added 'apply to all' checkbox for collision processing
2008.03.26 3.8.0 added support for selecting pre-defined systemServer URLs
2008.03.25 3.7.0 added support for setting 'server' fields on imported tiddlers (for later synchronizing of changes)
2008.01.03 3.6.0 in loadRemoteFile(), use lower-level doHttp() instead of loadRemoteFile() in order to support username/password access to remote server
2007.10.30 3.5.6 update [[ImportTiddlers]] shadow tiddler definition to include "inline" link
2007.06.27 3.5.5 added missing 'fields' params to saveTiddler() calls.  Fixes problem where importing tiddlers would lose the custom fields.  Also, moved functions for TW2.1.x to [[ImportTiddlersPluginPatch2.1.x]].
2007.06.25 3.5.4 added calls to store.suspendNotifications() and store.resumeNotifications().  Eliminates redisplay processing overhead DURING import activities
2007.04.29 3.5.3 in refreshImportList() when inbound tiddlers are loaded, change "close" button to "done", and disable certain controls to creates a modal condition, so that actions that reload tiddlers cannot be performed unless "done" is first pressed to end the mode..
2007.04.28 3.5.2 in handler(), added param support for custom link label/prompt
2007.04.19 3.5.1 in readTiddlersFromHTML(), for TW2.2 and above, use importTiddlyWiki() (new core functionality) to get tiddlers from remote file content.  Also, copied updated TW21Loader.prototype.internalizeTiddler() definition from TW2.2b5 so plugin can read tiddlers from TW2.2+ even when running under TW2.1.x
2007.03.22 3.5.0 in refreshImportList(), add handling for 'select section' when a heading is selected.  Makes it really easy to import by tag or date!
2007.03.21 3.4.0 split loadTiddlers functionality into separate plugin (see [[LoadTiddlersPlugin]])
2007.03.20 3.3.1 tweak to previous change to allow relative file references via http: (bypasses getLocalPath() so remote URL will be used)
2007.03.20 3.3.0 added support for local, relative file references: in loadRemoteFile(), check for fileExists().  If not found, prepend relative path and retry.
2007.02.24 3.2.1 re-labeled control panel "open" button to "load"
2007.02.09 3.2.0 loadTiddlers: added support for "noReload" tag (prevents overwriting existing tiddler, even if inbound tiddler is newer)
2007.02.08 3.1.3 loadTiddlers: added missing code and documentation for "newTags" handling (a feature change from long, long ago that somehow got lost!)
2006.11.14 3.1.2 fix macro handler parameter declaration (double-pasted param list corrupts IE)
2006.11.13 3.1.1 use apply() method to invoke hijacked core handler
2006.11.13 3.1.0 hijack built-in importTiddlers.handler() to co-exist with plugin interface.  If no params or 'core' keyword, display core interface.  "link" param embeds "import tiddlers" link that shows floating panel when clicked.
2006.10.12 3.0.8 in readTiddlersFromHTML(), fallback to find end of store area by matching "/body" when POST-BODY-START is not present (backward compatibility for older documents)
2006.09.10 3.0.7 in readTiddlersFromHTML(), find end of store area by matching "POST-BODY-START" instead of "/body" 
2006.08.16 3.0.6 Use higher-level store.saveTiddler() instead of store.addTiddler() to avoid conflicts with adaptations that hijack low-level tiddler handling.  in CreateImportPanel(), removed "refresh listbox after every tiddler change".
2006.07.29 3.0.5 added noChangeMsg to loadTiddlers processing.  if not 'quiet' mode, reports skipped tiddlers.
2006.04.18 3.0.4 in loadTiddlers.handler, fixed parsing of "prompt:" param. Also, corrected parameters mismatch in loadTiddlers() callback function definition (order of params was wrong, resulting in filters NOT being applied)
2006.04.12 3.0.3 moved many display messages to macro properties for easier L10N translations via 'lingo' definitions.
2006.04.12 3.0.2 more work on 'core candidate' code.  Proposed API now defines "loadRemoteFile()" for XMLHttpRequest processing with built in fallback for handling local filesystem access, and readTiddlersFromHTML() to process the resulting source HTML content.
2006.04.04 3.0.1 in refreshImportList(), when using [by tags], tiddlers without tags are now included in a new "untagged" psuedo-tag list section
2006.04.04 3.0.0 Separate non-interactive {{{<<importTiddlers...>>}}} macro functionality for incorporation into TW2.1 core and renamed as {{{<<loadTiddlers>>}}} macro.  New parameters for loadTiddlers: ''label:text'' and ''prompt:text'' for link creation,  ''ask'' for filename/URL, ''tag:text'' for filtering, "confirm" for accept/reject of individual inbound tiddlers.  Removed support for "importReplace/importPublic" tags and "force" param (unused feature). 
2006.03.30 2.9.1 when extracting store area from remote URL, look for "</body>" instead of "</body>\n</html>" so it will match even if the "\n" is absent from the source.
2006.03.30 2.9.0 added optional 'force' macro param.  When present, autoImportTiddlers() bypasses the checks for importPublic and importReplace.  Based on a request from Tom Otvos.
2006.03.28 2.8.1 in loadImportFile(), added checks to see if 'netscape' and 'x.overrideMimeType()' are defined (not in IE). Also, when extracting store area, look for "</body>\n</html>" and omit extra content that may have been added to the end of the file.
2006.02.21 2.8.0 added support for "tiddler:TiddlerName" filtering parameter in auto-import processing
2006.02.21 2.7.1 Clean up layout problems with IE.  (Use tables for alignment instead of SPANs styled with float:left and float:right)
2006.02.21 2.7.0 Added "local file" and "web server" radio buttons.  Default remote URL uses value from [[SiteURL]].  Also, added 'proxy' option, using value from [[SiteProxy]] as prefix to permit cross-domain document access via server-side scripting.
2006.02.17 2.6.0 Removed "differences only" listbox display mode, replaced with selection filter 'presets': all/new/changes/differences.  fixed init of "add new tags" checkbox
2006.02.16 2.5.4 added checkbox options to control "import remote tags" and "keep existing tags" behavior, in addition to existing "add new tags" functionality.
2006.02.14 2.5.3 FF1501 corrected unintended global 't' (loop index) in importReport() and autoImportTiddlers()
2006.02.10 2.5.2 corrected unintended global variable in importReport().
2006.02.05 2.5.1 moved globals from window.* to config.macros.importTiddlers.* to avoid FireFox 1.5.0.1 crash bug when referencing globals
2006.01.18 2.5.0 added checkbox for "create a report".  Default is to create/update the ImportedTiddlers report.
2006.01.15 2.4.1 added "importPublic" tag and inverted default so that auto sharing is NOT done unless tagged with importPublic
2006.01.15 2.4.0 Added support for tagging tiddlers with importSkip, importReplace, and/or importPrivate to enable/disable overwriting or sharing with others when using auto-import macro syntax.  Defaults: don't overwrite existing tiddlers, and allow your tiddlers to be auto-imported by others.
2006.01.15 2.3.2 Added "ask" parameter to confirm each tiddler before importing (for use with auto-importing)
2006.01.15 2.3.1 Strip TW core scripts from import source content and load just the storeArea into the hidden IFRAME to prevent imported document's core code from being invoked.  Also, when importing local documents, use convertUTF8ToUnicode() to support international characters sets.
2006.01.12 2.3.0 Reorganized code to use callback function for loading import files to support event-driven I/O via an ASYNCHRONOUS XMLHttpRequest instead of waiting for remote hosts to respond to URL requests.  Added non-interactive 'batch' mode, using macro parameters to specify source path/file or URL, and select tiddlers to import.  Improved messages and added optional 'quiet' switch for batch mode to eliminate //most// feedback.
2006.01.11 2.2.0 Added "[by tags]" to list of tiddlers, based on code submitted by BradleyMeck
2006.01.08 2.1.0 IMPORT FROM ANYWHERE!!! re-write getImportedTiddlers() logic to either read a local file (using local I/O), OR... read a remote file, using a combination of XML and an iframe to permit cross-domain reading of DOM elements.  Adapted from example code and techniques courtesy of Jonny LeRoy.
2006.01.06 2.0.2 When refreshing list contents, fixed check for tiddlerExists() when "show differences only" is selected, so that imported tiddlers that don't exist in the current file will be recognized as differences and included in the list.
2006.01.04 2.0.1 When "show differences only" is NOT checked, import all tiddlers that have been selected even when they have a matching title and date.
2005.12.27 2.0.0 Update for TW2.0
Defer initial panel creation and only register a notification function when panel first is created
2005.12.22 1.3.1 tweak formatting in importReport() and add 'discard report' link to output
2005.12.03 1.3.0 Dynamically create/remove importPanel as needed to ensure only one instance of interface elements exists, even if there are multiple instances of macro embedding.  Also, dynamically create/recreate importFrame each time an external TW document is loaded for importation (reduces DOM overhead and ensures a 'fresh' frame for each document)
2005.11.29 1.2.1 fixed formatting of 'detail info' in importReport()
2005.11.11 1.2.0 added 'inline' param to embed controls in a tiddler
2005.11.09 1.1.0 only load HTML and CSS the first time the macro handler is called.  Allows for redundant placement of the macro without creating multiple instances of controls with the same ID's.
2005.10.25 1.0.5 fixed typo in importReport() that prevented reports from being generated
2005.10.09 1.0.4 combined documentation with plugin code instead of using separate tiddlers
2005.08.05 1.0.3 moved CSS and HTML definitions into plugin code instead of using separate tiddlers
2005.07.27 1.0.2 core update 1.2.29: custom overlayStyleSheet() replaced with new core setStylesheet()
2005.07.23 1.0.1 added parameter checks and corrected addNotification() usage
2005.07.20 1.0.0 Initial Release
<<<
/***
|Name|ImportTiddlersPluginPatch|
|Source|http://www.TiddlyTools.com/#ImportTiddlersPluginPatch|
|Version|4.4.0|
|Author|Eric Shulman|
|License|http://www.TiddlyTools.com/#LegalStatements|
|~CoreVersion|2.1|
|Type|plugin|
|Requires|ImportTiddlersPlugin|
|Description|backward-compatible function patches for use with ImportTiddlersPlugin and TW2.1.x or earlier|
!!!!!Usage
<<<
The current version ImportTiddlersPlugin is compatible with the TW2.2.x core functions.  This "patch" plugin provides additional functions needed to enable the current version of ImportTiddlersPlugin to operate correctly under TW2.1.x or earlier.

{{medium{You do not need to install this plugin if you are using TW2.2.0 or above}}}
(though it won't hurt anything if you do... it will just take up more space).
<<<
!!!!!Revisions
<<<
2008.09.30 [4.4.0] added safety check for TW21Loader object and forward-compatible loadFromDiv() prototype to permit use with TW2.0.x and TW1.2.x.
2008.08.05 [4.3.2] rewrote loadRemoteFile to eliminate use of platform-specific fileExists() function
2008.01.03 [3.6.0] added support for passing txtRemoteUsername and txtRemotePassword for accessing password-protected remote servers
2007.06.27 [3.5.5] compatibility functions split from ImportTiddlersPlugin
|please see [[ImportTiddlersPlugin]] for additional revision details|
2005.07.20 [1.0.0] Initial Release
<<<
!!!!!Code
***/
//{{{
// these functions are only defined when installed in TW2.1.x and earlier... 
if (version.major+version.minor/10 <= 2.1) {

// Version
version.extensions.ImportTiddlersPluginPatch= {major: 4, minor: 4, revision: 0, date: new Date(2008,9,30)};

// fixups for TW2.0.x and earlier
if (window.merge==undefined) window.merge=function(dst,src,preserveExisting)
	{ for (p in src) if (!preserveExisting||dst[p]===undefined) dst[p]=src[p]; return dst; }
if (config.macros.importTiddlers==undefined) config.macros.importTiddlers={ };

config.macros.importTiddlers.loadRemoteFile = function(src,callback,quiet) {
	if (src==undefined || !src.length) return null; // filename is required
	if (!quiet) clearMessage();
	if (!quiet) displayMessage(this.openMsg.format([src]));

	if (src.substr(0,5)!="http:" && src.substr(0,5)!="file:") { // if not a URL, read from local filesystem
		var txt=loadFile(src);
		if (!txt) { // file didn't load, might be relative path.. try fixup
			var pathPrefix=document.location.href;  // get current document path and trim off filename
			var slashpos=pathPrefix.lastIndexOf("/"); if (slashpos==-1) slashpos=pathPrefix.lastIndexOf("\\"); 
			if (slashpos!=-1 && slashpos!=pathPrefix.length-1) pathPrefix=pathPrefix.substr(0,slashpos+1);
			src=pathPrefix+src;
			if (pathPrefix.substr(0,5)!="http:") src=getLocalPath(src);
			var txt=loadFile(src);
		}
		if (!txt) { // file still didn't load, report error
			if (!quiet) displayMessage(config.macros.importTiddlers.openErrMsg.format([src.replace(/%20/g," "),"(filesystem error)"]));
		} else {
			if (!quiet) displayMessage(config.macros.importTiddlers.readMsg.format([txt.length,src.replace(/%20/g," ")]));
			if (callback) callback(true,src,convertUTF8ToUnicode(txt),src,null);
		}
	} else {
		var x; // get an request object
		try {x = new XMLHttpRequest()} // moz
		catch(e) {
			try {x = new ActiveXObject("Msxml2.XMLHTTP")} // IE 6
			catch (e) {
				try {x = new ActiveXObject("Microsoft.XMLHTTP")} // IE 5
				catch (e) { return }
			}
		}
		// setup callback function to handle server response(s)
		x.onreadystatechange = function() {
			if (x.readyState == 4) {
				if (x.status==0 || x.status == 200) {
					if (!quiet) displayMessage(config.macros.importTiddlers.readMsg.format([x.responseText.length,src]));
					if (callback) callback(true,src,x.responseText,src,x);
				}
				else {
					if (!quiet) displayMessage(config.macros.importTiddlers.openErrMsg.format([src,x.status]));
				}
			}
		}
		// get privileges to read another document's DOM via http:// or file:// (moz-only)
		if (typeof(netscape)!="undefined") {
			try { netscape.security.PrivilegeManager.enablePrivilege("UniversalBrowserRead"); }
			catch (e) { if (!quiet) displayMessage(e.description?e.description:e.toString()); }
		}
		// send the HTTP request
		try {
			var url=src+(src.indexOf('?')<0?'?':'&')+'nocache='+Math.random();
			x.open("GET",src,true,config.options.txtRemoteUsername,config.options.txtRemotePassword);
			if (x.overrideMimeType) x.overrideMimeType('text/html');
			x.send(null);
		}
		catch (e) {
			if (!quiet) {
				displayMessage(config.macros.importTiddlers.openErrMsg.format([src,"(unknown)"]));
				displayMessage(e.description?e.description:e.toString());
			}
		}
	}
}

config.macros.importTiddlers.readTiddlersFromHTML=function(html) {
	// for TW2.1 and earlier
	// extract store area from html 
	var start=html.indexOf('<div id="storeArea">');
	var end=html.indexOf("<!--POST-BODY-START--"+">",start);
	if (end==-1) var end=html.indexOf("</body"+">",start); // backward-compatibility for older documents
	var sa="<html><body>"+html.substring(start,end)+"</body></html>";

	// load html into iframe document
	var f=document.getElementById("loaderFrame"); if (f) document.body.removeChild(f);
	f=document.createElement("iframe"); f.id="loaderFrame";
	f.style.width="0px"; f.style.height="0px"; f.style.border="0px";
	document.body.appendChild(f);
	var d=f.document;
	if (f.contentDocument) d=f.contentDocument; // For NS6
	else if (f.contentWindow) d=f.contentWindow.document; // For IE5.5 and IE6
	d.open(); d.writeln(sa); d.close();

	// read tiddler DIVs from storeArea DOM element	
	var sa = d.getElementById("storeArea");
	if (!sa) return null;
	sa.normalize();
	var nodes = sa.childNodes;
	if (!nodes || !nodes.length) return null;
	var tiddlers = [];
	for(var t = 0; t < nodes.length; t++) {
		var title = null;
		if(nodes[t].getAttribute)
			title = nodes[t].getAttribute("title"); // TW 2.2+
		if(!title && nodes[t].getAttribute)
			title = nodes[t].getAttribute("tiddler"); // TW 2.1.x
		if(!title && nodes[t].id && (nodes[t].id.substr(0,5) == "store"))
			title = nodes[t].id.substr(5); // TW 1.2.x
		if(title && title != "")
			tiddlers.push((new Tiddler()).loadFromDiv(nodes[t],title));
	}
	return tiddlers;
}

// // FORWARD-COMPATIBLE SUPPORT FOR TW2.1.x
// // enables reading tiddler definitions using TW2.2+ storeArea format, even when plugin is running under TW2.1.x
if (typeof TW21Loader!="undefined") {
TW21Loader.prototype.internalizeTiddler = function(store,tiddler,title,node) {
	var e = node.firstChild;
	var text = null;
	if(node.getAttribute("tiddler"))
		text = getNodeText(e).unescapeLineBreaks();
	else {
		while(e.nodeName!="PRE" && e.nodeName!="pre") e = e.nextSibling;
		text = e.innerHTML.replace(/\r/mg,"").htmlDecode();
	}
	var modifier = node.getAttribute("modifier");
	var c = node.getAttribute("created");
	var m = node.getAttribute("modified");
	var created = c ? Date.convertFromYYYYMMDDHHMM(c) : version.date;
	var modified = m ? Date.convertFromYYYYMMDDHHMM(m) : created;
	var tags = node.getAttribute("tags");
	var fields = {};
	var attrs = node.attributes;
	for(var i = attrs.length-1; i >= 0; i--) {
		var name = attrs[i].name;
		if (attrs[i].specified && !TiddlyWiki.isStandardField(name))
			fields[name] = attrs[i].value.unescapeLineBreaks();
		
	}
	tiddler.assign(title,text,modifier,modified,tags,created,fields);
	return tiddler;
};
}

// FORWARD-COMPATIBLE SUPPORT FOR TW2.0.x and TW1.2.x
// enables reading tiddler definitions using TW2.2+ storeArea format, even when plugin is running under TW2.0.x or TW1.2.x
if (typeof Tiddler.prototype.loadFromDiv!="undefined") {
Tiddler.prototype.loadFromDiv = function(node,title) { // Load a tiddler from an HTML DIV
	var e = node.firstChild;
	var text = null;
	if(node.getAttribute("tiddler")) {
		// get merged text from adjacent text nodes
		var t=""; while(e&&e.nodeName=="#text") { t+=e.nodeValue; e=e.nextSibling; }
		text = Tiddler.unescapeLineBreaks(t);
	} else {
		while(e.nodeName!="PRE" && e.nodeName!="pre") e = e.nextSibling;
		text = e.innerHTML.replace(/\r/mg,"").htmlDecode();
	}
	var modifier = node.getAttribute("modifier");
	var c = node.getAttribute("created");
	var m = node.getAttribute("modified");
	var created = c ? Date.convertFromYYYYMMDDHHMM(c) : version.date;
	var modified = m ? Date.convertFromYYYYMMDDHHMM(m) : created;
	var tags = node.getAttribute("tags");
	this.set(title,text,modifier,modified,tags,created);
	return this;
}
}

} // END OF pre-TW2.2 backward-compatibility functions
//}}}
On Wed 27 Jun 2012 02:57:25 PM EDT, WillKinney imported 1 tiddler from
[[http://bob.mcelrath.org/tiddlyjsmath-2.0.3.html|http://bob.mcelrath.org/tiddlyjsmath-2.0.3.html]]:
<<<
#[[tiddlyjsmath-2.0.3.html]] - added
<<<

----
On Thu 21 Jun 2012 10:34:46 AM EDT, YourName imported 50 tiddlers from
[[/home/whkinney/tmp/Wiki/Wiki.html|/home/whkinney/tmp/Wiki/Wiki.html]]:
<<<
#[[About Displaying Math]] - added
#[[About the Author]] - added
#[[Anthropic Principle]] - added
#[[Bad Intro Physics]] - added
#[[Climate Change]] - added
#[[Cosmic Microwave Background]] - added
#[[Cosmological Expansion and the Big Bang]] - added
#[[Cosmological Horizons]] - added
#[[Curved Spaces]] - added
#[[DefaultTiddlers]] - added
#[[Ekpyrosis]] - added
#[[Entropy]] - added
#[[Factorial]] - added
#[[Generic]] - added
#[[How to Play the Lottery]] - added
#[[I Am]] - added
#[[Inflation]] - added
#[[Irreducible Complexity]] - added
#[[Letters Journal]] - added
#[[MainMenu]] - added
#[[MarkupPostBody]] - added
#[[MarkupPostHead]] - added
#[[MochaSideBarOptions]] - added
#[[Morally Equivalent]] - added
#[[Natural]] - added
#[[NoticeBoard]] - added
#[[One Sigma Detection]] - added
#[[PageTemplate]] - added
#[[Seeing With New Eyes]] - added
#[[SiteSubtitle]] - added
#[[SiteTitle]] - added
#[[Space and Time]] - added
#[[Standard Model]] - added
#[[String Theory]] - added
#[[Student Fees]] - added
#[[StyleSheet]] - added
#[[Supersymmetry]] - added
#[[Tenure]] - added
#[[Test]] - added
#[[The Arrow of Time]] - added
#[[The number of different ways to roll ten of each number]] - added
#[[This is not a blog]] - added
#[[Two Sigma Detection]] - added
#[[Unnatural]] - added
#[[Welcome!]] - added
#[[configDefault]] - added
#[[contentFooter]] - added
#[[footerhack]] - added
#[[plugin:DisableWikiLinks]] - added
#[[plugin:jsMath]] - added
<<<
An online review article on inflationary cosmology.

[[(1.0) Introduction]]
>[[(1.1) The Metric]]
>[[(1.2) General Relativity and the Einstein Field Equation]]
>[[(1.3) The Friedmann Equation]]
>[[(1.4) Einstein's Greatest Blunder: the Cosmological Constant]]
>[[(1.5) Solving the Friedmann Equation]]
>[[(1.6) The Hot Big Bang and the Cosmic Microwave Background]]
>[[(1.7) The Age Crisis and the Physics of Nothing]]

''(2.0) Vacuum Energy and Cosmological Inflation''
>[[(2.1) Vacuum Energy in Quantum Field Theory]]
>[[(2.2) The Flatness and Horizon Problems]]
>[[(2.3) Inflation from Scalar Fields]]
>[[(2.4) Example: the \(\lambda \phi^4\) Potential]]

[[(3.0) Perturbations in Inflation]]
>[[(3.1) The Klein-Gordon Equation in Curved Spacetime]]
>[[(3.2) Quantization]]
>[[(3.3) Vacuum Selection]]
>[[(3.4) Exact Solutions and the Primordial Power Spectrum]]
>[[(3.5) The Curvature Perturbation]]
>[[(3.6) Example: the \(\lambda \phi^4\) Potential]]

[[(4.0) Observational Constraints]]

[[(5.0) Outlook and Conclusions]]

[[Acknowledgments]]
The main argument of anti-evolution "Intelligent Design" advocates such as [[Michael Behe|http://www.lehigh.edu/bio/faculty/behe.html]] is the idea of "irreducible complexity". The concept is that certain aspects of living organisms cannot have evolved incrementally, since disabling any part of them would make the entire structure non-functional. The analogy is a wristwatch: a watch missing even the tiniest of its internal parts would not function at all, so how could such a finely tuned structure have arisen incrementally through natural selection? The only logical alternative is that it was created wholesale, fully functional. This is certainly true of wristwatches, and the argument is that it is also true of some biological structures. For example, a classic canard of the Intelligent Design community is the bacterial flagellum: how can such a beautifully intricate structure have evolved by natural selection, when removal of any of its component parts would render it useless, just like a wristwatch? Behe even uses a graphic of the flagellum as the banner on his [[blog|http://www.uncommondescent.com/]]. 

You don't have to have a doctorate in molecular biology to understand that irreducible complexity is total bullshit. I am not going to argue the biology here: that has already been done very well by countless others more qualified than I. The basic idea is that organisms routinely take structures such as proteins which evolved for one use, and re-purpose them for an entirely new, unrelated use. This is not hard to understand, and it is devastating to the entire argument of irreducible complexity.  (In the case of the flagellum, the re-used structure appears to be the [[Type III Secretion System|http://www.pnas.org/content/100/6/3027.short]], a needle-like structure used by pathogens to inject proteins into other cells. A thorough debunking of the flagellum myth by Kenneth Miller at University of Colorado can be found [[here|http://www.millerandlevine.com/km/evol/design2/article.html]].) 

It has been made clear over and over again by people making careful, reasoned arguments that irreducible complexity is a vacuous thesis, and it is difficult to imagine any additional accumulation of facts penetrating the thick skulls of those who persist in shilling it. I am not going to bother with that. I am going to criticize irreducible complexity for another reason: it is not a scientific argument at all. The basic syllogism of irreducible complexity is:

//I cannot think of any way Thing X could have arisen through natural selection. Nobody else will ever think of a way Thing X could have arisen through natural selection. Therefore, God did it. //

Sounds pretty stupid doesn't it? That's because it //is// stupid. It is the opposite of a scientific argument. When a scientist doesn't understand something, she collects data, conducts experiments, and formulates theories in an effort to explain that which she does not understand. That's the whole point of science: science is a systematic method by which we explain things we do not understand. The irreducible complexity argument is the opposite of this: it asserts that anything we don't understand is proof that science is //impossible//. This is a meme that adapts itself with particular success to the ecosystem of zealous ignorati with [[blogs|This is not a blog]]. This is because the irreducible complexity argument is its own inoculation against disproof. It doesn't matter how many "irreducibly complex" systems like the flagellum are demonstrated to be perfectly well explained by natural selection. Some //other// supposedly irreducibly complex system will be trotted out, and will be held up as a counterexample to natural selection until a half dozen biologists waste their time to debunk the supposed counterexample. Then rinse and repeat. This tactic works because there are always going to be things that we do not yet understand, and it is cheap and easy to claim that such things are fundamentally impossible to explain. The hard work is in the explaining, and that is the work that scientists do. (Even cooler, than the flagellum, an [[organism which uses mechanical gears|http://www.sciencemag.org/content/341/6151/1254]] was discovered in 2013. )

The other option is to simply ignore the evidence: if you say something stupid often enough and loud enough, lots of people will take you seriously. This happens all the time in science, not just in biology: one need look no further than the [[Anthropic Principle]] for an example of this effect in physics. Gettting a stupid idea attention through simple repetition is easier if you have an academic position, like Behe or [[John Sanford|http://www.nysaes.cornell.edu/hort/faculty/sanford/]]. The authority of your credentials makes people take you more seriously.  Guys like Behe and Sanford usually crumble when forced to rigorously justify their positions, as Sanford famously did in his [[testimony before the Kansas Board of Education|http://www.talkorigins.org/faqs/kansas/kangaroo4.html#p1705]]. This does little to diminish their stature among the true believers, who appear to be glad to have //anybody// with credentials to hide behind. It can even perversely enhance one's credibility if the overwhelming majority of your peers label you a crackpot (case in point: Ben Stein's movie [[Expelled|http://www.expelledthemovie.com/]].) The reality is more mundane: there are a lot of crackpots with PhDs out there. This is an inevitable byproduct of intellectual diversity, and is nothing unusual or surprising. I suspect irreducible complexity would be largely ignored if it were not for the fact that it is, from beginning to end, all about God. 

Irreducible complexity advocates bend over backwards to distance themselves from any religious context, but religion is the elephant in the room.  Once you accept the argument from irreducible complexity that science is impossible, proponents of Intelligent Design are standing by with the only possible alternative: religion. Unlike other crackpot science theories, Intelligent Design is innately appealing to many because it reinforces their existing religious biases, for example the [[Abrahamic|http://en.wikipedia.org/wiki/Abrahamic_religions]] belief that we are made in God's image. If evolution is true, there is simply no way we were made in God's image, except in the most distant metaphorical sense. Evolution (and by extension science in general) is rightly perceived by religious fundamentalists to be a threat to their entire world view. I agree, and in fact I don't see any way to get around the conclusion that science is a threat to religion. Science is //supposed// to be a threat to religion. The whole point of science is to replace superstition with empiricism. Beliefs are tested against facts. This is an inherent difference between scientific and religious world views, despite the absurd Alice-in-Wonderland argument that science itself (&quot;Darwinism&quot;) is just another arbitrary belief system (a &quot;materialistic religion&quot; [[in Behe's words|http://www.uncommondescent.com/evolution/olivia-judson-lets-not-call-what-were-doing-darwinism/]]). The Roman Catholic church up the street from my house recently had the following on a sign posted in front of the church: &quot;Spirit above body, faith above mind, God above all.&quot; Science is, by its nature, the opposite of that, and whether those of religious faith like it or not, when science and religion find themselves at loggerheads, it is religion that is going to have to give way, every time. We now know, beyond any rational doubt, that human beings (like all other animals) are biochemical machines whose structure was determined by evolution. Not God. We can quit arguing about this now, and move on to worrying about the many other things we //don't// yet understand.  

This innate tension is often ignored by scientists, who are made uncomfortable by any suggestion of conflict between science and religion. I know many scientists who argue, reasonably enough, that there is no inherent conflict between a scientific world view and religious faith. However, this genteel rapprochement favored by many intellectuals //completely misses the point//. For example,  I really seriously doubt that any of my Christian colleagues honestly believe that Jesus actually cured leprosy.  [[Leprosy is caused by bacteria|https://en.wikipedia.org/wiki/Leprosy]], and because we understand this, we know that leprosy cannot be cured by the laying on of hands. I likewise doubt that any of them really think that:
* Jesus //actually// created loaves and fishes out of thin air. 
* Jesus //actually// turned water into wine. 
* Jesus //actually// raised anyone from the dead, or was himself resurrected from the dead. 
The list could go on for a very long time. and of course it does not apply only to Christian beliefs: I only pick on Christianity here because, as a product of an overwhelmingly Christian culture, I am most familiar with its particular legends. I am also //not// arguing that religion is without a place in society: religious mythology is mankind's literature of our longing for meaning, for a place in the universe. We may not be created in God's image, but we have always created God in ours, whom we un-create and re-create to suit our needs. This is a beautiful thing. Religious traditions should be preserved and cherished for the rich cultural heritage that they are. Who could think otherwise after reading the Gospels of Jesus, or  hearing the mournful, lovely call of the muezzin wafting in through an open window in the pre-dawn air in Cairo, or reading of the magnificent [[death practices of the Zoroastrians|https://www.nytimes.com/2012/11/30/world/asia/cultivating-vultures-to-restore-a-mumbai-ritual.html]]?

But most educated people of any faith -- I would guess especially scientists -- appropriately view religious metaphors as just that: //metaphors//. While I haven't ever pressed anybody I know on any of the specific points above (it would be pretty impolite), I do wonder for most people where they put the bright line between metaphor and literal truth in their faith. I confess to a certain admiration of Bible literalists (like my evil internet twin [[Will Kinney|http://brandplucked.webs.com/]]), because at least their ideas, loony as they might be, have a sort of logical clarity to them. They have at least thought about the issue. It's the squishy pseudo-agnosticism of my more rational friends that I wonder about. It is a sign of a healthy, open mind to admit that one does not know everything (or even anything very much at all), as Isaac Newton did in saying "I do not know what I may appear to the world, but to myself I seem to have been only like a boy playing on the sea-shore, and diverting myself in now and then finding a smoother pebble or a prettier shell than ordinary, whilst the great ocean of truth lay all undiscovered before me." To hold open the possibility that there may be a larger intelligence in the universe is almost an acknowledgment of the inevitable: to believe that we are the smartest thing in the cosmos utterly strains credulity. But does that amount to believing in //God//, by which I mean an omnipotent, interventionist being who gives a whit about our well being or moral choices? I don't see how one follows from the other, but even if one were to give that idea the benefit of the doubt, what would make one think that human religious institutions and traditions are likely to shed any useful light on the nature of such a being? 

Maybe it doesn't matter. Annie Dillard, in //Holy the Firm//, wrote "I know only enough of God to worship him, by any means ready to hand." Perhaps it is enough to honor the divine even in the knowledge that the human institutions constructed to do so are of whole cloth human inventions. But that is such a weak form of faith that one would hesitate to call it faith at all, because it does not really even require the belief that //God// literally exists to serve its purpose: one could just as easily replace God with Nature.  One ultimately has to make a choice between what one is willing to believe in the absence of evidence, and what one is not:
* Life after death?
* Heaven? 
* Hell?
* The existence of God at all?
Saying "I don't know one way or another" is ducking the question. Any scientist would be willing to abandon any of these ideas if presented with evidence that they are false, but which are you prepared to believe is literally true, not a metaphor, in the //absence// of any evidence that it actually //is// true? What makes you draw the line? To my mind, [[Pascal's wager|https://en.wikipedia.org/wiki/Pascal%27s_Wager]] is a cynical, cowardly bargain, and carries with it the terrible cost of servitude to living in fear of the unknown. I much prefer the philosophy, [[mis-attributed by the internet|https://en.wikiquote.org/wiki/Marcus_Aurelius#Misattributed]] to Marcus Aurelius: "Live a good life. If there are gods and they are just, then they will not care  how devout you have been, but will welcome you based on the virtues you have lived by. If there are gods, but unjust, then you should not want to worship them. If there are no gods, then you will be gone, but will have lived a noble life that will live on in the memories of your loved ones." Which is why I finally started calling myself an atheist. Because I am. I know where I draw my line, and I won't believe //any// of these things without evidence.  Science and faith can't really coexist without the contortions of lengthy and ultimately unsatisfying philosophical apologias. [[Not so with atheism|http://www.400monkeys.com/God/]].  The struggle to reconcile the irreconciliable disappears for the atheist, without reducing the awe and mystery of the natural world the slightest bit. This carries with it remarkable freedom: I fear death less, because I accept that non-existence after death is just as natural and simple to understand as non-existence before birth. I am free to raise my child free of the cloud of superstition that I struggled to overcome as an adolescent. Most importantly, I am free to honor //all// faiths, putting no one above the other. But none of them work as natural philosophy. We have science for that. 

//Letters Journal//

''-noun''
A publication in which short papers on important results are delayed indefinitely.
[img[Soup|./images/soupcan_small.jpg]]


<<closeAll>><<permaview>><<newTiddler>><<newJournal 'DD MMM YYYY' Journal>><<saveChanges>><<slider chkSliderOptionsPanel OptionsPanel 'options »' 'Change TiddlyWiki advanced options'>>
//Morally Equivalent//

''-adj.''
Things that I would like to be the same, but aren't.
//Natural//

''-adj.''
Something I like. 
[[About the Author]]
[[About Displaying Math]]

[img[Creative Commons 3.0 License|http://i.creativecommons.org/l/by-nc-sa/3.0/us/88x31.png]]  
[[ License: Creative Commons 3.0 | http://creativecommons.org/licenses/by-nc-sa/3.0/us/ ]]



[img[http://www.eff.org/br/br.gif][http://www.eff.org/blueribbon.html]]

//One Sigma Detection//

''-noun''
Request for more funding. (See [[CDMSII|http://www.sciencemag.org/cgi/content/abstract/science.1186112?ijkey=RVTKiEZ5rRmf2&keytype=ref&siteid=sci]].)
<!--{{{-->
<div id='header' class='header'>
<div class='headerShadow'>
<span class='searchBar' macro='search'></span>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>

</div>
<div id='mainMenu'>
<span refresh='content' tiddler='MainMenu'></span>
<span id='noticeBoard' refresh='content' tiddler='NoticeBoard'></span>

</div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='MochaSideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<div id='contentFooter' refresh='content' tiddler='contentFooter'></div>
<!--}}}-->
Recursively parses blocks from a LaTeX File into a linked set of objects. 

LaTeXBlock.py:
{{{
#!/usr/bin/python

import sys, re
import StringIO


#################################################################################################
#                                                                                               #
# Definitions and basic handling functions for the LaTeXBlock class                             #
# License: Attribution-NonCommercial-ShareAlike 3.0 United States (CC BY-NC-SA 3.0)             #
#          <http://creativecommons.org/licenses/by-nc-sa/3.0/us/>                               #
#                                                                                               #
#################################################################################################

#
# Class to represent a block of a LaTeX file enclosed in \begin{} and \end{} tags. This
# block can contain other LaTeXBlocks, stored in the self.blocks[] array. 
#
class LaTeXBlock(object):
	def __init__(self, SourceCode=None, SourceType=None):
		"""Initialization for LaTeXBlock""" 
		self.type = SourceType
		self.source = SourceCode
		self.blocks = []
	def __repr__(self):
		"String representation of LaTeXBlock."
		return 'LaTeXBlock object: "%s"' % self.type

	#
	# Iterator which returns lowest-level blocks in LaTeXBlock object
	#
	def GetBlocks(self):
		if self.blocks:
			for bl in self.blocks:
				for b in bl.GetBlocks():
					yield b
		else:
			yield self

	#
	# Function to assemble a flat LaTeX file from a LaTeXBlock object.
	#
	def Flatten(self):
		buf = ""
	
		for bl in self.GetBlocks():
			if bl.type == 'Text' :
				buf = buf + bl.source
			elif bl.type is not None:
				buf = buf + '\\begin{' + bl.type + '}\n'
				buf = buf + bl.source
				buf = buf + '\end{' + bl.type + '}'

		return buf

	#
	# Recursively reads an input stream into a hierarchical array of LaTeXBlock objects. 
	#
	def Read_Stream(self,stream,top=True):
	
		def is_literal(typ):
			literals=['figure','equation','eqnarray']
			for l in literals:
				if typ == l:
					return True
			return False

		#
		# Read all LaTeXBlock objects out of input stream
		#
		for bl in iterLaTeX(stream):
			self.blocks.append(bl)


		#
		# Recursively read LaTeXBlock objects out of these new objects
		#	
		if (top or len(self.blocks) > 1): # stream has no subblocks -- we're done. 
			for bl in self.blocks:

				#
				# Handle literal environments -- don't search these for subblocks
				#
				if is_literal(bl.type):
					continue

				st = StringIO.StringIO(bl.source)
				bl.Read_Stream(st,False)
	
		return


#
# Reads a line from a TeX file and does some minimal preprocessing. 
#
def read_TeXLine(stream):
	line = stream.readline()

	#
	# Handle special case equation delimeters.
	#
	p = re.compile(r"\\\[")
	match = p.search(line)
	while match:
		line = p.sub(r"\\begin{equation}",line,1)
		match = p.search(line)

	p = re.compile(r"\\\]")
	match = p.search(line)
	while match:
		line = p.sub(r"\\end{equation}",line,1)
		match = p.search(line)

	return line


#
# Generator for a LaTeX block iterator from an input stream, splitting objects delimited
# by \begin{} and \end{} tags into separate blocks. 
#
def iterLaTeX(stream):

	begintag = r"\\begin\{(.*?)\}"

	saved = ''
	while 1:

		SourceType = 'Text' # Default source type is plain text
		text = ''
		line = ''
		if saved:
			line = saved
			saved = ''
		else:
			line=read_TeXLine(stream)
			if not line:
				return

		p = re.compile(begintag)
		match=p.search(line) # Check for \begin tag as first line of block
		if match:
			SourceType=repr(match.group(1)).strip(repr(match.group(1))[0]) # Save type and continue
			spl = re.split(begintag,line,1)
			if spl[-1]: # Check for text on line after \begin tag
				line = spl[-1]
			else:
				line = stream.readline()

			# Copy until the corresponding \end tag
			tag = r"\\end\{"+SourceType+"\}"

		else: # Search until we hit a \begin tag.
			tag = begintag

		#
		# Read until we find the next tag. 
		#
		p = re.compile(tag)
		match = None
		while line:
			match = p.search(line)
			if match:
				break
			text = text + line
			line=read_TeXLine(stream)

		spl = re.split(tag, line, 1)
		if tag==begintag:
			#
			# If we found a begin tag, save everything before the tag in the text
			# of the current LaTeXBlock, and save the tag and everything following in
			# saved, to be parsed as the start of the next block. 
			#
			if spl[-1]:
				text = text + spl[0]
				saved = "\\begin{" + spl[-2] + "}" + spl[-1]
				saved = saved.rstrip()
			else:
				saved = line
		else:
			#
			# If we found an end tag, save everything before the tag in the text of the
			# current LaTeXBlock, and save everything following in saved, dropping the tag. 
			#
			if len(spl) > 1:
				text = text + spl[0]
				saved = spl[-1]
			else:
				saved = ''

		# 
		# Now create and return a LaTeXBlock object
		#
		if text:
			block = LaTeXBlock(text,SourceType)
			yield block
		else:
			return

}}}
I have tossed together a little python script for linux to implement the clever password scheme outlined in [[this XKCD strip|http://xkcd.com/936/]]:

[img(100%,auto)[http://imgs.xkcd.com/comics/password_strength.png]]

Here's the script:

{{{

#!/usr/bin/python
#
# Python script to generate a random password using the XKCD 
# scheme <http://xkcd.com/936/> For a 100,000 word dictionary, 
# generates 62 bits of entropy.
#

import os
import struct

dict = open('/usr/share/dict/american-english')
words = dict.readlines()
dict.close()

rand = open("/dev/random","rb")

passwd = ''
for i in range(0,4):
   randnum = struct.unpack('I',rand.read(4))[0]
   passwd = passwd + ' ' + words[randnum % len(words)].rstrip()

rand.close()

print passwd

}}}
Routines to take a SPIRES-format citation and return bibliography data. Requires [[pybtex|http://pybtex.sourceforge.net/]].

SPIRES.py:
{{{
#!/usr/bin/python

import sys, re, urllib
from pybtex.database.input import bibtex


#################################################################################################
#                                                                                               #
# Definitions and basic handling functions for SPIRES-format BibTeX citations.                  #
# Author: Will Kinney (Univ. at Buffalo, SUNY)                                                  #
# License: Attribution-NonCommercial-ShareAlike 3.0 United States (CC BY-NC-SA 3.0)             #
#          <http://creativecommons.org/licenses/by-nc-sa/3.0/us/>                               #
#                                                                                               #       
#################################################################################################

#
# Function to check if a reference is in SPIRES format
#
def is_SPIRES_format(ref):
		
	p = re.compile(r"[\S*]*:[\d][\d][\d][\d][\D]{2,3}(?!.)")
	match = p.search(ref)
	if match:
		return True
	else:
		return False

#
# Function to return bibliography data for a citation. Default behavior is to look the reference
# up on SPIRES, <http://inspirehep.net>. 
#
def get_BibTeX(ref,Bib=None,prefer_SPIRES=True):

	ref = ref.strip() # Get rid of whitespace

	#
	# If the reference is in SPIRES format, check SPIRES for bibliography data
	#
	if (prefer_SPIRES and is_SPIRES_format(ref)):
		# Get the BibTeX-format reference from SPIRES
		biblink = "http://inspirehep.net/search?ln=en&p=" + ref + "&of=hx"
		Bib = urllib.urlopen(biblink)
	elif Bib is not None:
		Bib.seek(0)

	if Bib is not None:
		parser = bibtex.Parser()
		bib = parser.parse_stream(Bib)
		if  bib.entries:
			#
			# Return bibliography entry (if any) corresponding to reference key. 
			#
			return bib.entries.get(ref,None)
		else:
			return None

	else:
		return None

#
# Makes a BibTeX-format bibliography entry from pybtex bibliography entry.
#
def MakeBibEntry(ref,BibTeX_data):
	if BibTeX_data == None:
		return None
	if ref == None:
		print 'SPIRES.MakeBibEntry(): No reference tag provided'
		return None
	else:
		#
		# Initialize text string for bibliography entry
		#
		text = '@' + BibTeX_data.type + '{' + ref + ',\n'

		#
		# First handle author data.
		#
		for key in BibTeX_data.persons.keys():
			text = text + key + ' = \"'
			firstname = True
			for person in BibTeX_data.persons[key]:
				if firstname != True:
					text = text + ' and '
				if person.last():
					text = text + person.last()[0]
					if person.first():
						text = text + ', ' + person.first()[0]
						if person.middle():
							text = text + ' ' + person.middle()[0]
				firstname = False
			text = text + '\",\n'			
		
		#
		# Now transcribe other fields.
		#
		for field in BibTeX_data.fields:
			text = text + field + ' = ' + '\"' + BibTeX_data.fields[field] + '\",\n'
		text = text + '}\n\n'
		return text
}}}
Next time it is a clear night, go out and look at the sky. What do you see?

Stars. 

Now ask yourself: Why? It seems like a silly question to ask, but it is anything but. Answering the question speaks volumes about the history, and the future, of astronomy. 

When you look at the sky, here is what you see:

[img[The sky in visible light|./images/NewEyes/visual_mw.gif]]

This picture is an image of the entire night sky as seen in visible light, displayed as a Mollweide projection. A map of the Earth using the same projection looks like this:

[img[Earth in Mollweide projection|./images/NewEyes/Mollweide-projection.jpg]]

For almost all of human history, astronomy was done by going outside and looking at the sky. With the naked eye on a dark night, you can see about a thousand stars, perhaps the moon and a handful of planets glowing by reflected light from the sun. The first scientist to systematize naked-eye astronomy was [[Hipparchos|http://en.wikipedia.org/wiki/Hipparchus]], who lived from 190 to 120 B.C.E. Hipparchos made the first catalog of stars, classifying them by brightness into six bins which we now call //magnitudes//. Hipparchos called the brightest stars //first// magnitude, the next brightest //second// magnitude, and so on, with the dimmest stars you can see being //sixth// magnitude. Perhaps the greatest nake-eye astronomer after Hipparchos was [[Tycho Brahe|http://en.wikipedia.org/wiki/Tycho_Brahe]], whose observations at his observatory //Uraniborg// were accurate enough for [[Johannes Kepler|http://en.wikipedia.org/wiki/Johannes_Kepler]] to determine that planets orbit the sun in elliptical orbits, not circular orbits as has been assumed by [[Copernicus|http://en.wikipedia.org/wiki/Nicolaus_Copernicus]].

We now know that there are many, many stars in the sky which are too dim to see with the naked eye, but the ancients did not know this. Perhaps they may have guessed that what they could see was not all there was, but how could they prove such a thing? It was not until [[Galileo|http://en.wikipedia.org/wiki/Galileo_Galilei]] first thought to turn a telescope to the heavens that it became possible to prove that there are stars too faint to see with the naked eye. What limits your eye's ability to see very dim stars is the size of your pupil, which limits the amount of light your eye can collect. A telescope effectively gives you a bigger pupil, collecting and concentrating dim light, making the invisible visible. Modern optical telescopes have //huge// pupils, the largest of them such as the [[Keck telescope|http://en.wikipedia.org/wiki/W._M._Keck_Observatory]] with diameters of 10 meters or so. Larger telescopes are in the works. Note that it is not the //magnification// of a telescope that is important here, but the //light gathering power//, which is proportional to the surface area of the telescope's pupil, or the square of its diameter. A 10-meter telescope has 100 times the light gathering power of a 1-meter telescope, and // 400 million// times the light gathering power of your eye, assuming your pupil is about 5 mm across. With the invention of the telescope, we have literally built ourselves new, bigger eyes. The image of the sky above was created by combining telescope images, and shows features dimmer than would be visible with the naked eye. With the telescope, it is possible to tell that the cloudy band of light in the sky called the //Via Lacta//, or Milky Way, is composed of the combined light of billions of dim stars arranged in the thin disk of our galaxy, which is placed at the equator of the image above. The first crude maps of our galaxy were produced by [[William Herschel|http://en.wikipedia.org/wiki/William_Herschel]] and his sister [[Caroline|http://en.wikipedia.org/wiki/Caroline_Herschel]], using what at the time was the largest telescope in the world. Herschel assumed (incorrectly) that dim stars were further away than bright ones, and used this to create the first three-dimensional map of the universe.

The other thing that limits your eye is the sensitivity of your retina. Even with a telescope amplifying the amount of light their eyes could collect, astronomers from Galileo to Herschel still used their retinas to detect that light. And retinas are very limited instruments: our cone cells can only detect three colors of light, and form our perceived palette of colors from combinations of those three. Other animals, such as dogs, have more limited retinas, and see fewer colors. Some animals, like bees, can see more colors than humans -- bees can see in the near ultraviolet as well. Modern astronomers no longer look through telescopes with their eyes, but use [[charge-coupled detectors|http://en.wikipedia.org/wiki/Charge-coupled_device]] to detect photons and store the images in computers. 

The first step away from using the eye as an astronomical detector came with the invention of photography, and beginning in the late 19th Century, photographic plates replaced the eye at the focus points of astronomical telescopes. Photography makes possible something that is impossible with the human eye: long-exposures. A photographic telescope can collect light for minutes or hours, assembled into a //single// image, making it possible to detect dimmer and dimmer objects as the exposure time is lengthened. This enhances even further the light gathering power of a large telescope. 

The advent of photography also allowed for another important advance: photographs made it possible to systematize Hipparchos' ancient magnitude sytsem by accurately measuring the brightness  of stars. A standard of brightness based on observation by eye was inevitably subjective, but photographic plates are objective and repeatable measurement standards. The calibration of the magnitude system to photographic plates was undertaken by [[Henrietta Leavitt|http://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt]], and led to broundbreaking discoveries. Perhaps the most important of these was the realization that certain types of variable stars called [[Cepheid variables|http://en.wikipedia.org/wiki/Cepheid_variable]] have a //period-luminosity relationship//. This means that the rate of variation in the brightness of the star is related to the luminosity of the star, making Cepheids an example of what astronomers call a //standard candle//, or an object of known intrinsic brightness. 

Standard candles are incredibly powerful tools, because they make it possible to measure the distances to objects in the sky. Astrometry, or measuring the position on the sky of celestial objects is a comparatively easy job. Measuring how far away those objects are is incredibly hard, and is a central problem for astronomers: How far away is the Sun? Jupiter? Sirius? The Andromeda Galaxy? Without traveling into space, the only direct way to measure the distances to astronomical objects is through triangularion, or [[parallax|http://en.wikipedia.org/wiki/Parallax]], but  this is only an effective technique for stars very nearby the sun. Standard candles make it possible to extend distance measurements nearly to the edge of the observable universe. The technique is simple. If we know the intrinsic brightness of an object, for example a 60-watt light bulb, we can infer its distance from how bright it //appears// to be, since the apparent brightness of a light source scales as the inverse square of its distance $r$:
$$
{\rm Apparent\ brightness} = \frac{\rm Intrinsic\ brightness}{r^2}
$$
Therefore, if we know the intrinsic brightness of an object, we can infer its distance by measuring its apparent brightness. This requires a calibrated, quantitative measure of brightness, and this was what was provided by Henrietta Leavitt's calibrated magnitude scale based on photographic plates. Leavitt's Cepheid variable stars as a standard candle were the tool that allowed [[Edwin Hubble|http://en.wikipedia.org/wiki/Edwin_Hubble]] to first measure the distance to the spiral "nebulae" in the early 1920's and recognize that they were galaxies like the Milky Way, and to first measure the expansion of the universe, leading to the development of the Big Bang model for cosmology, one of the most profound scientific developments in human history. Hubble's discovery of cosmological expansion,  following, fewer than twenty years after Leavitt's discovery of Cepheid variables, was directly enabled by the replacement of the human eye by photographic plates in telescopes. 

Still, photographic plates shared a weakness in common with the human eye: they were only sensitive to a narrow band of wavelength. Early photograhic emulsions were only sensitive to the blue portion of the visible spectrum, which made it possible to process black-and-white film in darkrooms lit by a red safelight. Later //panchromatic// films were sensitive to the entire visible spectrum, but were still limited to the same wavelengths seen by the naked eye. This was on purpose: for terrestrial photography, the goal is to reproduce the sensory experience of the eye. Astronomers inhereted this limitation, and it took nearly a century of ingenuity and creativity to overcome it. 

Looking only at visible wavelengths limits how you see the world, simply because the visible spectrum is such a tiny fraction of the total spectrum of electromagnetism. The human eye is sensistive to wavelengths between 390 to 750 nanometers, but naturally occurring electomagnetic waves can be as short as 10 picometers ($\gamma$ rays) or as long as 100 kilometers (radio waves), a factor of more than a quadrillion in wavelength:

[img[Image Credit: XKCD <http://xkcd.com>|http://imgs.xkcd.com/comics/electromagnetic_spectrum_small.png]]
[...under construction]
In reading the recent media coverage of the [[plagiarism allegations|https://www.nytimes.com/2012/08/20/business/media/scandal-threatens-fareed-zakarias-image-as-media-star.html]] against journalist [[Fareed Zakaria|https://en.wikipedia.org/wiki/Fareed_Zakaria]], I came across what was to me an interesting thing. In addition to copying text from historian [[Jill Lepore|https://en.wikipedia.org/wiki/Jill_Lepore]] (which was clearly plagiarism), Zakaria was also accused of [[giving identical commencement speeches|http://articles.boston.com/2012-06-08/metro/32101945_1_commencement-talks-graduation-speakers-speeches]] at Harvard and Duke, which the [[National Review at least|http://www.nationalreview.com/media-blog/313676/how-much-more-plagiarism-fareed-zakaria-out-there-greg-pollowitz]] implies is an act of plagiarism. Is there even such a thing as "self-plagiarism"? A little Googling shows that lots of people think so, for example:

- New Yorker writer Jonah Lehrer was [[accused of self-plagiarism|http://www.slate.com/articles/life/culturebox/2012/06/jonah_lehrer_self_plagiarism_the_new_yorker_staffer_stopped_being_a_writer_and_became_an_idea_man_.html]] in 2012.

- An [[article in the blog Economics Intelligence|http://economicsintelligence.com/2011/09/12/bruno-frey-more-cases-of-self-plagiarism-unveiled/]] about the self-plagiarism scandal surrounding economist Bruno Frey. 

- An [[article in Nature|http://www.nature.com/news/2010/101208/full/468745a.html]] about self-plagiarism accusations against engineering professor Reginald Smith. 

- [[Splat|http://splat.cs.arizona.edu/]], a self-plagiarism detection tool from the University of Arizona Computer Science Department. "It is our belief that self-plagiarism is detrimental to scientific progress and bad for our academic community."

- A [[Journal of Media and Culture article|http://journal.media-culture.org.au/0510/07-green.php]] on "The Scourge of Self-Plagiarism". 

- [[Eminent Scientist Denies Self-Plagiarism|http://blogs.nature.com/news/2012/04/eminent-chemist-denies-self-plagiarism-in-space-dinosaurs-paper.html]], in Nature. (The article links to the [[American Chemical Society's Ethics Guidelines|http://pubs.acs.org/userimages/ContentEditor/1218054468605/ethics.pdf]], which explicitly prohibit self-plagiarism. )

- [[A Study of Self-Plagiarism|http://www.insidehighered.com/views/2010/12/03/zirkel]] from //Inside Higher Ed//.

It's not hard to find other examples of academic or professional writers getting in trouble for re-using their own words. Despite having published academic papers for 17 years, I admit I was pretty surprised by this. In my field (Cosmology and High Energy Physics), re-use of material is so common as to be completely unremarkable. For example, virtually every paper released in High Energy Physics is published for free on the [[arXiv.org server|http://www.arXiv.org]], typically before the paper is even submitted to a peer-reviewed journal. Often the version of the paper ultimately published by the journal is somewhat different than the version posted on arXiv, but I (and many other researchers) make a habit of updating the arXiv version with the changes made to the paper in the referee process. Journals tolerate the practice, presumably because the editors know that there would be a revolt among high-energy physicists if they attempted to assert copyright against arXiv. When arXiv got started at Los Alamos National Lab in the early 1990's ([[before|http://opcit.eprints.org/talks/ahis/arxiv-history.html]] the widespread adoption of the World Wide Web), its email-based distribution of research articles almost certainly violated the copyright restrictions of every academic journal then in existence. But it became indispensable to the community before the journals noticed the threat to their exclusivity, and the academic publishing industry had no real choice but to retroactively permit the posting of papers on arXiv.  The American Physical Society, for example, now [[explicitly allows republication on the web|http://publish.aps.org/copyrightFAQ.html#eprint]], as long as no fee is charged for access. Elsevier's policy is [[less clear|http://www.elsevier.com/wps/find/authorsview.authors/preprints]]. Elsevier only began to allow updates of electronic versions to match the published version in 2004, and [[does not allow electronic preprints at all|http://www.elsevier.com/wps/find/authorsview.authors/rights]] for articles in //Lancet// and //Cell//. //Science// [[does not allow submission|https://www.sciencemag.org/site/feature/contribinfo/faq/index.xhtml#prioronline_faq]] of papers which have been posted online. //Nature// [[makes you wait six months|http://www.nature.com/authors/policies/license.html]], but then allows Creative Commons licensing of papers, an unusually enlightened attitude. Whatever the response of individual journals, arXiv may well be the most significant (and under-appreciated) act of civil disobedience in the history of academic publishing. 

If one does not consider posting a preprint on arXiv "republication", one can find many examples of authors releasing multiple, nearly identical versions of the same paper. For example, [[Michael Turner|http://astro.uchicago.edu/people/michael-s-turner.shtml]] (somewhat infamously) released [[multiple versions of the same conference proceeding|http://inspirehep.net/search?ln=en&ln=en&p=find+a+turner%2C+m.+s.+and+t+solved&of=hb&action_search=Search&sf=&so=d&rm=&rg=25&sc=0]] as preprints simultaneously in 1998. Turner is a distinguished scientist, having held posts as Chair of of the Department of Astronomy and Astrophysics at the University of Chicago, as Chief Scientist at Argonne National Laboratory, and as Assistant Director for the Mathematical and Physical Sciences at the National Science Foundation. Is Turner guilty of academic misconduct? All of the papers I link to above were substantially identical, and were published as separate articles by different academic presses. One of the papers was [[published in an Elsevier journal|http://www.sciencedirect.com/science/article/pii/S0920563298005052]], which [[claims exclusive copyright on the work|http://www.elsevier.com/wps/find/termsconditions.cws_home/termsconditions]]. These were conference proceedings, which means that Turner also gave exactly the same talk at multiple conferences. Was that self-plagiarism too? If it was, then pretty much every scientist I know is guilty, myself included. It is ubiquitous to present the same contribution at multiple conferences, and equally ubiquitous to submit substantially identical papers to multiple proceedings publishers, as well as to include text and figures in conference proceedings that are also published in peer-reviewed papers. These proceedings are at least sometimes [[sold at very high prices|http://www.amazon.com/The-Dawn-LHC-Era-Proceedings/dp/981283835X/ref=sr_1_2?ie=UTF8&qid=1345473209&sr=8-2&keywords=TASI+Proceedings]]. This sort of blatant recycling is frowned upon in peer-reviewed journals, where there is an expectation that an author is submitting a work that was not previously published.  However, it is also common to publish a [[short version|http://arxiv.org/abs/hep-ph/9511460]] of a research result in a letters journal, followed by a [[longer version|http://arxiv.org/abs/hep-ph/9512241]] in a regular journal, with substantial overlap in content. Is //this// misconduct? How about if the same content is again [[included in a PhD thesis|http://www.physics.buffalo.edu/whkinney/cv/thesis/thesis.ps]]?

There seems to be [[a lot of confusion|https://en.wikipedia.org/wiki/List_of_academic_journals_by_preprint_policy]] on this issue. The National Science Foundation's [[regulations on research misconduct|http://www.nsf.gov/oig/resmisreg.pdf]] never mention self-plagiarism, and define plagiarism as "the appropriation of //another person’s// ideas, processes, results or words without giving appropriate credit." (Italics mine.) However, the NSF has [[investigated researchers|http://www.nsf.gov/pubs/2003/oigsept2003/pdf/investigations.pdf]] (p. 38) over incidents of self-plagiarism in grant proposals. (In the case linked to here, the finding was that "Because the university did not find the PI’s behavior to be a serious deviation from accepted practice within his community, the conduct did not meet the federal definition of research misconduct.") My own university [[defines|http://undergrad-catalog.buffalo.edu/policies/course/integrity.shtml]] plagiarism as "Copying or receiving material from any source and submitting that material as one’s own, without acknowledging and citing the particular debts to the source (quotations, paraphrases, basic ideas), or in any other manner representing the //work of another// as one’s own." (Italics mine.) The university policy separately prohibits "Submitting academically required material that has been previously submitted—in whole or in substantial part—in another course, without prior and expressed consent of the instructor", but does not classify this as plagiarism. Furthermore, in an educational context, the "no resubmission" rule has a legitimate pedagogical purpose: writing a paper for a class is an exercise, the purpose of which is entirely missed if the student just re-submits a paper he or she used last semester in a different course. It's not plagiarism, but it //is// a failure to complete the course requirements as specified. 

I would like to argue that there is no such thing as self-plagiarism. Instead, the concept is entirely a byproduct of the insane norms of copyright in the academic world. As such, it is a case study in what is wrong with academic publishing, and what needs to be changed. 

Consider [[this white paper|http://www.ithenticate.com/Portals/92785/media/ith-selfplagiarism-whitepaper.pdf]] on self-plagiarism put out by the software company [[Ithenticate|http://www.ithenticate.com]], which sells anti-plagiarism software. Ithenticate quotes the [[Merriam Webster dictionary|http://www.merriam-webster.com/dictionary/plagiarizing]] to classify plagiarism as  "literary theft", regardless of the source. There is an effort here to conflate //plagiarism// with //copyright violation//, and to classify both as "theft". Equating copyright infringement with theft is a noxious Orwellian meme [[propagated by media companies|https://www.youtube.com/watch?v=HmZm8vNHBSU]], but in the case of self-plagiarism, the audacity is particularly jaw-dropping. Let me get this straight: I write an article for a journal (for which I am paid nothing), it is refereed by other scientists like me (also for nothing), and the journal sells the article back to my university as part of a subscription which runs upward of $10,000 a year. Then if I want to use a paragraph from my own paper somewhere else, //I'm// stealing from the //journal//? What the //fuck//? It is common knowledge that the music industry is a [[cesspool of greed and servitude|http://www.vanityfair.com/hollywood/features/2011/10/prince-bio-201110]] (see also [[Courtney Love Does the Math|http://www.salon.com/2000/06/14/love_7/]]), but in this respect I would argue that academic publishing is even worse. At least Courtney Love gets paid something. 

It all comes down to who owns not just your words, but your ideas. In the traditional model of scientific publishing, when an article I write is accepted for publication by a journal, I sign a [[copyright transfer agreement|http://forms.aps.org/author/copytrnsfr.pdf]] which assigns ownership of copyright to the publisher, with some rights retained by the author, such as the right to put the preprint version on arXiv.org or on a personal web page. When I do this, I am agreeing to a contract with the publisher, and I am in principle doing so voluntarily. In practice, this is not really "voluntary", since academic publishers are a powerful cartel, and my promotion, tenure, and research funding are largely predicated on my ability to publish in sufficiently prestigious journals. Consider [[Physical Review Letters|http://prl.aps.org/]], which, aside from //Science// or //Nature//, is probably the most prestigious journal in which cosmologists regularly publish. PRL nominally has higher standards than its sister journal [[Physical Review D|http://prd.aps.org/]] since it is a "letters" journal, and typically involves review by at least two referees, instead of the typical single referee for PRD.  Back in the day when people read journals that were actually printed on physical paper, bound, and sent out to libraries, letters journals occupied an important niche. PRL and others like it were the place that important results of general interest were slotted for rapid publication: PRL went out weekly, instead of monthly or quarterly like standard journals, and the articles were short, a maximum of four pages, to accommodate the exigencies of publishing on an accelerated schedule. Getting a letter published in PRL meant something, because it signified that your work was important enough to be worth the trouble to rush out. Now getting published in PRL instead of PRD means ... pretty much nothing. Far from being a "rapid communication" journal, PRL is usually //slower// to publish than a standard journal, because the requirement of multiple referees greatly slows the review process, especially when the referees disagree with one another, as frequently happens. Not that it matters much, because in the age of arXiv, by the time a good paper is published in a peer-reviewed journal, it has already been read and cited multiple times. Letters journals no longer have any purpose whatsoever, and the only reason they continue to exist is the perception of prestige. In order to publish in PRL (or any other major journal), I am required to sign away essentially all rights to my own intellectual output, and I am paid in //prestige//, a quantity which last I checked is only fungible in conference rooms full of old white guys who spend their time giving each other awards. And the journal owns my words in perpetuity. 

Now, if I were to take a paper I had published in PRL and re-publish the same paper in, say [[Physics Letters B|http://www.journals.elsevier.com/physics-letters-b/]], I would be violating the copyright agreement that I "voluntarily" agreed to with PRL, as well as my agreement with PLB that I am submitting previously unpublished work. Both journals would be very unhappy with me, and rightly so. This would be true even if the PLB paper were a completely re-written description of the same research. But I haven't plagiarized anything. I //have// broken a commercial contract, which is something, but it is not plagiarism, and it is most certainly not "theft". Plagiarism, in its broadest sense, is the act of misleading the reader about the origin of an idea. By republishing, I am not misleading anyone about the origin of the idea, unless you consider the "origin" to be PRL and not //me//. And that's the crux of the "self-plagiarism" argument: conflating the source of a given piece of text or an idea with the technical use of the word //source// to mean the reference to a published article. //Even if// I have entered a contract with a publisher to give them exclusive rights to disseminate a research result, that doesn't mean the journal is the //source// of the research. The journal is just an intermediary. 

The second argument often used against self-plagiarism is that it constitutes a misrepresentation of research output: it makes you look as if you have written more papers than you really did. I don't think this holds much water either, since all that means is that counting the //number// of papers that an academic has produced is a terrible measure of the quality of his or her scholarship. (Consider Hendrik Sch&ouml;n, who wrote an amazing 90 articles in three years, or a new article every twelve days. He didn't have to self-plagiarize: he just [[made it all up|http://www.salon.com/2002/09/16/physics/]].) University administrators can be lazy, and they tend to like a single clean metric to evaluate scholarship. My college is fond of using the [[h-index|https://en.wikipedia.org/wiki/H-index]] as a metric for faculty output. The h-index [[has its own problems|http://www.sciencesurvivalblog.com/wp-content/uploads/laloe_mosseri.pdf]], but self-plagiarism isn't one of them: bumping up your publication count by recycling work has no effect on your h-index, and might even serve to lower it by diluting citations among multiple papers. The bottom line is that using //any// single metric to evaluate academic performance is bound to produce poor results. And claiming academic misconduct because a given behavior distorts an oversimpified metric goes beyond laziness, it's nasty and punitive. 

What can be done make things better? 

- Allow authors to retain the rights to their work. Authors can extend to publishers an exclusive license for commercial publication while still retaining copyright, including all rights for non-commerical use or re-use. The [[Creative Commons|http://creativecommons.org/]] license (which I use for this Wiki) accomplishes this in a simple and transparent way. 

- Make the exclusive licenses extended to publishers time-limited, perhaps to six months or a year. This allows journals to serve their purpose (peer-review and dissemination of new research results) without locking up an author's ideas in perpetuity. 

- Require //all// publicly-funded research to be open access. The National Institutes of Health already do this, but their efforts are [[under severe fire|http://news.sciencemag.org/scienceinsider/2012/01/bill-blocking-nih-public-access-.html]]. Other agencies have been slow to follow suit. Opposition to open access requirements has nothing to do with academic quality or integrity, and everything to do with protecting the profits of the high-handed and unaccountable publishing industry. Taxpayers should be livid that open access isn't already the norm. 

- Authors should carefully cite their own work. Even the most strident self-plagiarism opponents define self-plagiarism as re-using your own work //without citation//. Transparency is always good, and there is rarely a legitimate impediment to simply noting that a given piece of text has appeared elsewhere. 

- Join the [[boycott of Elsevier|http://thecostofknowledge.com/]]. Academic presses like the [[American Physical Society|http://www.aps.org/]] and its European equivalent the [[Institute of Physics|http://www.iop.org/]] have been maddeningly slow to adopt open access policies, but at least they are nonprofits. Elsevier is a purely commercial entity, and has been spending a lot of money [[lobbying in opposition|http://www.slate.com/blogs/future_tense/2012/02/28/research_works_act_elsevier_and_politicians_back_down_from_open_access_threat_.html]] to open access for academic research. I fail to see how scientists can continue to support an organization which uses profits from their unpaid labor to be used to lobby against the interests of its own contributors and referees. 

- Plagiarize yourself. Make re-use and re-mixing of text the norm, rather than the exception. Authors stand to gain from this, since any given idea will get more exposure the more venues in which that idea is available. The only ones who stand to lose are cartels who make a profit off the artificial scarcity created by restricting access. 
[[Will Kinney's|About the Author]] web notebook
Primordial Soup
Einstein's Special Theory of Relativity unified space and time, providing  a picture of a four-dimensional "spacetime" which treats time as just another coordinate. It is hard to visualize or diagram four-dimensional spacetime, but it is easy to visualize a lower-dimensional analog, consisting of one spatial dimension and one time dimension, called a "1+1" spacetime. We can draw a 1+1 spacetime as a graph of time \(t\) versus spatial position \(x\):
[img[1+1 spacetime|./images/Spacetime/xtdiagram.png]]
A point in space and time (your birth, the Trinity Test, the world record 100m sprint)  is called an //event// \(P\):
[img[Event|./images/Spacetime/event.png]]
The complete history of any object (a proton, or planet, or you) consists of a collection of events, called a //world line//:
[img[World Line|./images/Spacetime/worldline.png]]
In spacetime, your worldine is your entire life, a static object which stretches out from beginning to end as a connected sequence of events. Other people's world lines intersect and diverge from yours, forming a spaghetti of cause and effect. (For a clever illustration of world lines, look at  [[this comic from xkcd|http://xkcd.com/657/]].)

You measure time as the distance you have traveled along your world line. Another observer measures time as the distance she has traveled along her world line -- these times may or may not be the same, even for world lines which start and end at the same two events (\(P_1\) and \(P_2\) in the diagram below), just as the distance along a path between two points in space depends on the path. 
[img[Proper Time|./images/Spacetime/propertime.png]]
Such a distance along a world line is called the //proper time//, which we call \(s\) to distinguish it from the time \(t\) on the vertical axis on the diagram. Spacetime has a funny property: a straight line in spacetime is sometimes the //longest// distance between two points. In the diagram above, the proper time \(s_a\) measured by the observer \(a\) moving along the straight (blue) path in spacetime is //longer// than the proper time \(s_b\) measured by the observer \(b\) moving along the curved (red) path. Notice that the spatial position \(x\) of the observer \(a\) does not change along his path in spacetime: observer \(a\) is stationary. However, the spatial position \(x\) of the observer \(b\) does change along her path in spactime: observer \(b\) is moving. Since the proper time \(s_b\) measured by observer \(b\) is less than the proper time \(s_a\) measured by observar \(a\), that means that the moving observer's clock is running slower than the stationary observer's clock. 

How is it that straight lines in spacetime can be longer than curved lines? To understand this, we must look in more detail at how distances in spacetime are measured. Recall the Pythagorean theorem for triangles in Euclidean geometry:
[img[Pythagorean Theorem|./images/Spacetime/triangle.png]]
The length \(\ell\) of the hypotenuse is given in terms of the lengths \(x\) and \(y\) of the legs by
\begin{equation}
\ell^2 =x^2 + y^2.
\end{equation}

Spacetime works differently. The rules of geometry in spacetime do not follow the rules of Euclidean geometry. Suppose we draw a triangle in spacetime where one leg goes along the spatial direction \(x\) and the other goes along the time direction \(t\):
[img[Pythagorean Theorem|./images/Spacetime/triangle_st.png]]
For a triangle in spacetime, the length \(s\) of the hypotenuse is given by
\begin{equation}
s^2 = t^2 - x^2.
\end{equation}
Note the minus sign. Spaces for which the Pythagorean rule has a minus sign in this way are called //hyperbolic// geometries. We see that our simple drawings of \(t\) vs. \(x\) do not fully capture the geometry of spacetime, since the drawing is Euclidean, and spacetime is not. In spacetime, a straight line is not always the shortest distance between two points: sometimes it's the //longest//. We need to keep in mind that our spacetime diagram is just a cartoon of the real thing. 

Let us look again at our two observers taking different paths between events \(P_1\) and \(P_2\):
[img[Proper Time|./images/Spacetime/propertime.png]]
Observer \(a\) stays at the same point in space, and moves only through time. In other words, observer \(a\) is //stationary//. Observer \(b\), on the other hand, changes position \(x\) as she moves through time \(t\): observer \(b\) is in motion. The distance \(s_a\) traveled by observer \(a\) is
\begin{equation}
s_a^2 = (\Delta t)^2 = (t_2 - t_1)^2.
\end{equation}
This means that the //proper time//, or the elapsed time as seen by observer \(a\) is just the time as plotted on the diagram, \(s = t_2 - t_1\). We can see now that the spacetime coordinate \(t\) represents the time as measured by a //stationary// observer. All stationary observers agree on the difference in time between events \(P_1\) and \(P_2\). The time \(s_b\)  measured by the observer \(b\) is different, however,
\begin{equation}
s_b^2 = (\Delta t)^2 - (\Delta x)^2 < s_a^2
\end{equation}
The moving observer measures //less// time between events \(P_1\) and \(P_2\): her clock ticks slower than the clock of an observer at rest. This is the famed //time dilation// effect in Special Relativity. 

Now consider an observer moving along a path which makes a \(45^\circ\) angle in the \(x,t\) space, such that \(\Delta x = \Delta t\) Such a world line corresponds to an observer moving at the speed of light: for example if we measure distance in light years and time in years, then an light goes one light year per year.
[img[A null world line|./images/Spacetime/null.png]]
The proper time measured by such an observer is
\begin{equation}
\Delta s^2 = (\Delta t)^2 - (\Delta x)^2 = 0.
\end{equation}
The path has //zero// length: the observer's clock has stopped completely! What about an observer moving faster than the speed of light, \(\Delta x > \Delta t\)? The //square// of the proper time is then negative:
\begin{equation}
\Delta s^2 = (\Delta t)^2 - (\Delta x)^2 < 0,
\end{equation}
which means that the proper time along such a path is an imaginary number, a nonsensical answer. For our picture of spacetime to make sense, observers are only allowed to move through spacetime on paths with real proper time, \(\Delta s^2 > 0\). It is impossible to go faster than light!
//Standard Model//

''-noun''
A theory of particle physics which perfectly explains every experiment, but which we know is wrong. 
//String Theory//

''-noun''
A particle physics theory which predicts anything. Requires [[Supersymmetry]].
//Student Fees//

''-noun''
Tuition.
/*{{{*/
/*Mocha TiddlyWiki Theme*/
/*Version 1.0*/
/*Design and CSS originally by Anthonyy, ported to TiddlyWiki by Saq Imtiaz.*/
/*Modified by Will Kinney*/
/*}}}*/
/*{{{*/
 #contentWrapper{
margin: 0 3.4em;

			font-family: Lucida Grande, Tahoma, Arial, Helvetica, sans-serif; /* Lucida Grande for the Macs, Tahoma for the PCs */
font-size: 13px;
			line-height: 1.6em;
			color: #666;
}

.header {
 background: #fff; 
			padding-top: 10px;
			clear: both;

border-bottom: 4px solid #948979;
}

.headerShadow {	padding: 2.6em 0em 0.5em 0em; }

.siteTitle {
			font-family: 'Lucida Grande' sans-serif;
			font-weight: bold;
			font-size: 32px;
			color: #003399;
			margin-bottom: 30px;
			background-color: #FFF;
}

.siteTitle a{color:#003399; border-bottom:1px dotted #003399;}

.siteSubtitle {
	font-size: 1.0em;
        display: block;
        margin: .5em 3em; color: #999999;
}

#mainMenu {
position:relative;
float:left;
margin-bottom:1em;
display:inline;
text-align:left;
padding: 2em 0.5em 0.5em 0em;
width:13em;
font-size:1em;
}

#sidebar{
position:relative;
float:right;
margin-bottom:1em;
padding-top:2em;
display:inline;

}

#displayArea {
	margin: 0em 17em 0em 15em;
}

.tagClear {clear:none;}

#contentFooter {background:#575352; color:#BFB6B3; clear: both; padding: 0.5em 1em;}

		
		#contentFooter a {
			color: #BFB6B3;
			border-bottom: 1px dotted #BFB6B3;
		}
		
		#contentFooter a:hover {
			color: #FFFFFF;
			background-color:#575352;
		}

		a,#sidebarOptions .sliderPanel a{
			color:#003399;
			text-decoration: none;
		}

		a:hover,#sidebarOptions .sliderPanel a:hover {
			color:#003399;
			background-color: #F5F5F5; 
		}

.viewer .button, .editorFooter .button{
	color: #666;
	border: 1px solid #003399;
}

.viewer .button:hover, 
.editorFooter .button:hover{
	color: #fff;
	background: #003399;
	border-color: #003399;
}

.viewer .button:active, .viewer .highlight,.editorFooter .button:active, .editorFooter .highlight{color:#fff; background:#575352;border-color:#575352;}


		#mainMenu a {
			display: block;
			padding: 5px;
			border-bottom: 1px solid #CCC;
		}

		#mainMenu a:link, #navlist a:visited {
			color:#003399;
			text-decoration: none;
		}
		
		#mainMenu a:hover {
			background: #000000 url(arrow.gif) 96% 50% no-repeat;
			background-color: #F5F5F5;
			color:#003399;
		}
		
		#mainMenu a:hover, #mainMenu a:active, #mainMenu .highlight, #mainMenu .marked {
			background: #000000 url(arrow.gif) 96% 50% no-repeat;
			background-color: #F5F5F5;
			color:#003399;
		}

#mainMenu span {position:relative;}

#mainMenu br {display:none;}

#sidebarOptions a {
			color:#999;
			text-decoration: none;
		}

#sidebarOptions	a:hover {
			color:#4F4B45;
			background-color: #F5F5F5;border:1px solid #fff;
		}

#sidebarOptions {line-height:1.4em;}

		.tiddler {
			padding-bottom: 40px;
			border-bottom: 1px solid #DDDDDD; 
		}
.title {color:#003399;}
.subtitle, .subtitle a { color: #999999; font-size: 1.0em;margin:0.2em;}
.shadow .title{color:#948979;}

.selected .toolbar a {color:#999999;}
.selected .toolbar a:hover {color:#4F4B45; background:transparent;border:1px solid #fff;}

.toolbar .button:hover, .toolbar .highlight, .toolbar .marked, .toolbar a.button:active{color:#4F4B45; background:transparent;border:1px solid #fff;}

 .listLink,#sidebarTabs .tabContents {line-height:1.5em;}
 .listTitle {color:#888;}

#sidebarTabs .tabContents {background:#fff;}
#sidebarTabs .tabContents .tiddlyLink, #sidebarTabs .tabContents .button{color:#999;}
#sidebarTabs .tabContents .tiddlyLink:hover,#sidebarTabs .tabContents .button:hover{color:#4F4B45;background:#fff}

#sidebarTabs .tabContents .button:hover, #sidebarTabs .tabContents .highlight, #sidebarTabs .tabContents .marked, #sidebarTabs .tabContents a.button:active{color:#4F4B45;background:#fff}

.tabSelected{color:#fff; background:#948979;}

.tabUnselected {
 background: #ccc;
}

 .tabSelected, .tabSelected:hover {
 color: #fff;
 background: #948979;
 border: solid 1px #948979;
padding-bottom:1px;
}

 .tabUnselected {
 color: #999;
 background: #eee;
 border: solid 1px #ccc;
padding-bottom:1px;
}

#sidebarTabs .tabUnselected { border-bottom: none;padding-bottom:3px;}
#sidebarTabs .tabSelected{padding-bottom:3px;}


#sidebarTabs .tabUnselected:hover { border-bottom: none;padding-bottom:3px;color:#4F4B45}

#sidebarOptions .sliderPanel {
	background: #fff; border:none;
	font-size: .9em;
}
#sidebarOptions .sliderPanel a {font-weight:normal;}
#sidebarOptions .sliderPanel input {border:1px solid #999;}

.viewer blockquote {
	border-left: 3px solid #948979;
}

.viewer table {
	border: 2px solid [[ColorPalette::TertiaryDark]];
}

.viewer th, thead td {
	background: #948979;
	border: 1px solid #948979;
	color: #fff;
}
.viewer pre {
	border: 1px solid #948979;
	background: #f5f5f5;
}

.viewer code {
	color: #2F2A29;
}

.viewer hr {
	border-top: dashed 1px #948979;
}

.editor input {
	border: 1px solid #948979;
}

.editor textarea {
	border: 1px solid #948979;
}

.popup {
	background: #948979;
	border: 1px solid #948979;
}

.popup li.disabled {
	color: #000;
}

.popup li a, .popup li a:visited {
	color: #eee;
	border: none;
}

.popup li a:hover {
	background: #575352;
	color: #fff;
	border: none;
}

.tagging, .tagged {
	border: 1px solid #eee;
	background-color: #F7F7F7;
}

.selected .tagging, .selected .tagged {
	background-color: #eee;
	border: 1px solid #BFBAB3;
}

 .tagging .listTitle, .tagged .listTitle {
	color: #bbb;
}

.selected .tagging .listTitle, .selected .tagged .listTitle {
	color: #666; 
}

.tagging .button, .tagged .button {
		color:#aaa;
}
.selected .tagging .button, .selected .tagged .button {
		color:#4F4B45;
}

.highlight, .marked {background:transparent; color:#111; border:none; text-decoration:underline;}

.tagging .button:hover, .tagged .button:hover, .tagging .button:active, .tagged .button:active {
		border: none; background:transparent; text-decoration:underline; color:#000;
}

h1,h2,h3,h4,h5 { color: #666; background: transparent; padding-bottom:2px; font-family: Arial, Helvetica, sans-serif; }
h1 {font-size:18px;}
h2 {font-size:16px;}
h3 {font-size: 14px;}

#messageArea {
	border: 4px solid #948979;
	background: #f5f5f5;
	color: #999;
        font-size:90%;
}

#messageArea a:hover { background:#f5f5f5;}

#messageArea .button{
	color: #666;
	border: 1px solid #003399;
}

#messageArea .button:hover {
	color: #fff;
	background: #948979;
	border-color: #948979;
}


* html .viewer pre {
	margin-left: 0em;
}

* html .editor textarea, * html .editor input {
	width: 98%;
}

.searchBar {float:right;font-size: 1.0em;margin: 0 1em}
.searchBar .button {color:#999;display:block;}
.searchBar .button:hover {border:1px solid #fff;color:#4F4B45;}
.searchBar input {			
                        background-color: #FFF;
			color: #999999;
			border: 1px solid #CCC;		margin-right:3px;
}

#sidebarOptions .button:active, #sidebarOptions .highlight {background:#F5F5F5;}

*html #contentFooter { padding:0.25em 1em 0.5em 1em;}

#noticeBoard {font-size: 0.9em; color:#999; position:relative;display:block;background:#fff; clear: both; margin-right:0.5em; margin-top:60px; padding:5px; border-bottom: 1px dotted #CCC; border-top: 1px dotted #CCC;}
#mainMenu #noticeBoard a,#mainMenu #noticeBoard .tiddlyLink {display:inline;border:none;padding:5px 2px;color:#003399 }
#noticeBoard a:hover {border:none;}	

#noticeBoard br {display:inline;}

#mainMenu #noticeBoard  .button{
	color: #666;
	border: 1px solid #DF9153;padding:2px;
}

#mainMenu #noticeBoard  .button:hover{
	color: #fff;
	background: #DF9153;
	border-color: #DF9153;
}

.searchbar {position:relative; width:11em;}
.searchbar .button{margin:0; width:11em;}
#header {display:inline-block;}
/*}}}*/

/* Disable jsMath font warnings */
#jsMath_Warning {display: none}

/* Size jsMath fonts */
.typeset  {font-size: 140%}
//Supersymmetry//

''-noun''
A particle physics theory for which half of the particles have been discovered.
txt_GATracker_id: UA-32844249-1
txt_GATracker_track: 1%2C0%2C1%2C1%2C0%2C0%2C0
//Tenure//

''-noun''
A state of being in which one's work ethic vanishes at the same time as one's ego swells to cartoonish proportions. 

A purely geometrical description of time as suggested by relativity (see [[Space and Time]]) is seriously incomplete. This is because the //direction// of time is arbitrary. We could draw events on a spacetime diagram with time (or, equivalently, the order of all events) reversed, and it would appear perfectly reasonable. This is reflected in the laws of physics as well: all of the fundamental laws of physics as we understand them work exactly the same way with time going backwards as forwards. (The precise symmetry is //CPT invariance//, but this is merely a technicality. )

For example, consider Newton's law of motion, \(F = ma\). It has often been lamented by gloomy Cartesians that if we were to know the position and velocity of every particle in the universe at any given time, the future would be perfectly determined.  In the language of differential equations, this is the idea of a //boundary condition//. When you give the state of a system at any one time, its state at all other times is determined by the law governing the system's evolution. But Newton's Law contains no notion of past or future: it works the same going backward in time as forward. (It seems much less fatalistic to assert that if we were to know the position and velocity of every particle in the universe, the //past// would be perfecty determined!)

Quantum mechanics helps a little, in that in a quantum world, it is impossible to know the position and velocity of every particle. The randomness built into quantum physics allows us a way out of Cartesian determinism. But Schrodinger's Equation is just as agnostic about the //direction// of time as Newton's Law is. It tells us nothing about why it is that we can know the past, but not the future. This indifference to the direction of time extends to every law of physics we know about. 

This is completely at odds with the world as we see it. The real world has causality: cause and effect extend in an infinite chain from past to future. In the language of spacetime, this is expressed as imposing a //causal order// on events. An //event// is a point in space and time, and the universe is a big collection of events. We have an idea that one event (the hammer of a gun falls) //causes// another event (the cartridge fires) which in turn causes another event (a soldier falls). It would be preposterous to describe the above sequence in reverse: a dead soldier springs to life and throws a supersonically moving piece of metal from his chest.  Meanwhile, gases from the atmosphere chemically react to form a solid inside a narrow metal tube just in time to be sealed in by the flying bullet, throwing the hammer of the gun back and latching it in the cocked position. 

But //why// is it preposterous? The laws which govern the evolution of systems in the universe have no notion of causal ordering: this is a notion which must be imposed on top of what we think of as the "fundamental" laws of physics. Why are there no creatures in the world like Merlin, living time backward, from the past to the future? A partial answer is entropy:  the total entropy of the universe always increases with time, a rule called the Second Law of Thermodynamics. The laws governing the interactions of microscopic particles have no notion of a direction to time, but the laws governing the statistics of large numbers of such particles //do// have such a notion, in spades. 

Why does the universe evolve in the direction of increasing entropy? It is a simple question of numbers. [[Entropy is a form of counting|Entropy]]: low-entropy states are fewer in number than high entropy states. Therefore, if a system starts out in a low-entropy state, it is overwhelmingly more likely to move to a state of higher entropy than it is to move to a state of lower entropy. The numbers involved are so huge that the inexorable increase of entropy, although strictly speaking only a probable outcome, is for all practical purposes  a certainty. Entropy also explains why we cannot "remember" the future as we do the past: storing information in our brains requires an increase in entropy. To remember a phone number or a new friend's face or what we had for breakfast, we must burn sugars for energy to fire neurons and configure synapes in our brains, releasing heat and causing the entropy of the universe to increase a tiny bit. A high-entropy universe can "remember" a low-entropy universe, but a low-entropy universe cannot "remember" a high-entropy one. And we can know the past, but not the future. 

This is only a partial explanation. Remember that the fundamental laws governing the evolution of the universe are time-symmetric. Therefore, just as a low-entropy state is overwhelmingly likely to evolve into a high-entropy state, it is also overwhelmingly likely that a low-entropy state evolved //from// a high-entropy state. But it is vanishingly unlikely to move from high entropy to low entropy, so simply saying that we perceive time to move in the direction of the increase of entropy leaves us with the question: how did we get into such a low-entropy state to begin with? What set the initial conditions of the universe? There are only a few logical possibilities:

(1) The universe has an infinite age. Anything that sits around for long enough will //eventually// experience a random fluctuation to a low entropy state. Unfortunately, this seems at odds with what we know about the Big Bang. The universe we live in appears to have a finite age. 

(2) The universe has infinite, or at least very large, size. If there is enough spatial volume to play with, a vanishing few regions of the universe will be lucky enough to be in a low-entropy state. This, in a nutshell, is the idea of a "multiverse" which is championed by cosmologists like Alan Guth and Andre Linde, who hypothesize a huge network of spacetime bubbles created by cosmological inflation.

(3) Action of an external agent. This was Aristotle's explanation, the idea of a //Prime Mover// for the cosmos, the original cause from which the entire chain of cause and effect emanates. This idea was later embraced by Augustine as a logical proof for the existence of God. This is by nature an unscientific argument, however, since a Prime Mover is by definition a supernatural actor, and science seeks to explain the world through natural phenomena.

(4) There is a fundamental physical law which is time //asymmetric//, and which we do not yet understand. It is hard to imagine how such a hidden physical law could exist and yet still result in an emergent universe which appears to be time-symmetric. (One attempt to formulate such a law is the [[Hartle/Hawking No Boundary Proposal|http://www.aei.mpg.de/einsteinOnline/en/spotlights/quantum_cosmo_path_integrals/]], which suffers from a number of technical difficulties.)

Options 1 & 2 are clearly related.  It is hard to see a spatially infinite universe as logically distinct from a past-infinite universe, since the distinction between space and time is infinitely malleable in General Relativity One can certainly construct particular solutions to Einstein's Equations for General Relativity that are spatially infinite, but have finite age: the flat Friedmann-Robertson-Walker universe of the Big Bang is one such. But such cosmologies are almost certainly incomplete, because they are plagued by singularities at the initial time. Likewise, it is possible to construct solutions which are temporally infinite but spatially finite, such as Einstein's static universe. But Einstein's static universe is unstable, which means the initial conditions for such a universe require low entropy to begin with, and therefore we haven't actually explained anything. It is just a guess, but it is not unreasonable to expect that whatever new laws we formulate to cope with spacetime singularities will inevitably link the size of the universe with its age: an infinitely large universe most probably must be infinitely old, and vice-versa. Even if this does not turn out to be true, explaining the low entropy universe with a surfeit of either space or time will almost certainly require us to resort to the [[Anthropic Principle]], which is pretty much equivalent to abandoning a scientific explanation altogether. We can do better. 

This leaves us with Option 4, which is the one that I favor. There are already a few hints in the universe we see that something is not quite right about a strictly time-symmetric universe, most prominently the fact that the universe is made entirely of matter and not antimatter, but it is not clear if the asymmetry between matter and antimatter can be related directly to a low-entropy initial state. 
How many different ways are there to roll 60 dice such that you get ten of each possible number 1 through 6? Start with the dice arranged in numerical order:

[img[Sixty Dice|./images/Dice/sixtydice.jpg]]

//If we don't care about the order//, the number of ways to roll ten of each number is just the number of different ways this set of dice can be rearranged.  To calculate this number, we need to be able to count the number of different ways a set of things can be ordered, which is known as the [[factorial|Factorial]]. The number of different ways to arrange \(n\) different items, for example books on a bookshelf, is:

\[n! = n \times (n - 1) \times (n - 2) \times \cdots \times 3 \times 2 \times 1.\]

Suppose two books on my bookshelf are identical (for example I might have two copies of Moby Dick). In this case, swapping those two copies doesn't result in a new ordering, and I have to account for that. For example, if I have four books, but two are identical, there are only twelve possible arrangements:

A B C C
B A C C
A C B C
B C A C
A C C B
B C C A
C A B C
C B A C
C A C B
C B C A
C C A B
C C B A

So the total number of possible arrangements is:

\[N = \frac{24}{2} = \frac{4!}{2!} = 12.\]

Notice what is happening here: swapping two identical books doesn't do anything, so for the case of four books with two identical, the rule is that I take the total number of ways to arrange four books (\(4!\) = 24) and divide by the number of ways to arrange two books (\(2! = 2\)), since rearrangements of the two identical books have no effect.  If I have four books, and //three// are identical, rearrangements of the three identical books don't change anything. Therefore,  I only have four possible arrangements:

A B B B 
B A B B
B B A B 
B B B A

or, using our rule, we divide the number of ways to arrange four books by the number of ways to arrange three books:

\[N = \frac{24}{6} = \frac{4!}{3!} = 4.\]

In general, if I have \(n\) books total, but \(m\) of those books are identical, I have to factor out the \(m!\) indistinguishable arrangements of \(m\) books:

\[N = \frac{n!}{m!}.\]

This brings us to the 60 dice. How many different ways are there to roll 10 ones, 10 twos, 10 threes, and so forth? This is just given by the number of different orders giving these totals, //i.e.// the number of different ways to arrange a group of sixty objects consisting of six sets of ten //identical// objects. The number of ways to arrange sixty objects is 

\[N = 60!\]. 

However, for each set of ten identical dice (10 ones or 10 twos), we have to divide out the number of ways of rearranging each set of 10, or:

\[N = \frac{60!}{10! \times 10! \times 10! \times 10! \times 10! \times 10!}  = \frac{60!}{(10!)^{6}}\] 
\[\qquad = 3,644,153,415,887,633,116,359,073,848,179,365,185,734,400\].

Quite a big number!
Five reasons why this is not a blog:

(1) It's a [[Wiki|http://en.wikipedia.org/wiki/Wiki]].  It is not organized by date, and will not follow current events. I successively refine entries as I find better ways to do things. Any timeliness is purely coincidental.

(2) There is no comment section, [[hilarious as those sometimes can be|http://dorigo.wordpress.com/2007/08/29/lisa-randall-black-holes-out-of-reach-of-lhc/]].

(3) I will not be posting anything about [[my glamorous globe trotting|http://blogs.discovermagazine.com/cosmicvariance/2006/11/12/travel-travel-and-more-travel/]], or about [[making coffee|http://blogs.discovermagazine.com/cosmicvariance/2010/01/25/coffee-rituals/]], despite the crucial importance of such matters to scientific discourse. 

(4) I am not  [[a crackpot|http://behe.uncommondescent.com/]].

(5) I am not [[irate|http://scienceblogs.com/pharyngula/]] about anything. 

This is not a blog. The only blog I read regularly is [[Bike Snob NYC|http://bikesnobnyc.blogspot.com/]]. 
//Two Sigma Detection//

''-noun''
See [[One Sigma Detection]]
//Unnatural//

''-adj.''
Something I don't like. 
Welcome to Will Kinney's web notebook. This is a collection of personal science notes and essays that I am making available on the web in Wiki format. [[This is not a blog]], although it //is// intended to contain a mix of technical, pedagogical and more opinion-oriented science writing, much of it accessible to a general audience.

''Technical Articles:''
[[Inflation]], an online review of inflationary cosmology.

''Essays:''
[[A Bet on Non-Gaussianity]]
[[Irreducible Complexity, Science and Atheism]]
[[Self-Plagiarism]]
[[I Am]]
[[How to Play the Lottery]]
[[Entropy]]
[[Cosmological Expansion and the Big Bang]]
[[Cosmological Horizons]]
[[Cosmic Microwave Background]]
[[Bad Intro Physics]]
[[The Arrow of Time]]
[[Anthropic Principle]]

''Code:''
[[Auto-generating BibTeX Bibliography Files]]
[[Retrieving Bibliography Data from SPIRES]]
[[Parsing LaTeX Files]]
[[Converting LaTeX to Tiddlywiki]]
[[Passwords]]

''Miscellaneous:''
[[Astrophotos]]
[[Construction of a Lego Menger Sponge]]
<<tag [[Devil's Dictionary]]>>

The notebook is powered by [[TiddlyWiki|http://www.tiddlywiki.com/]], a very cool Javascript-based Wiki application. 
A running list of papers which have piqued my interest.

[[δN formalism|http://arxiv.org/abs/1208.1073]]

[[Measuring cosmic bulk flows with Type Ia Supernovae from the Nearby Supernova Factory|http://arxiv.org/abs/1310.4184]]

[[Nonlocal Gravity and Structure in the Universe|http://arxiv.org/abs/1310.4329]]

[[Primordial non-Gaussianities in single field inflationary models with non-trivial initial states|http://arxiv.org/abs/1310.4482]]

[[Attractor Solutions in Scalar-Field Cosmology|http://arxiv.org/abs/1309.2611]]

[[Coleman-Weinberg Inflation in light of Planck|http://arxiv.org/abs/1309.1695]]

[[Universality classes of inflation|http://arxiv.org/abs/1309.1285]]

[[Minimal Supergravity Models of Inflation|http://arxiv.org/abs/1307.7696]]

[[Cosmicflows-2: The Data|http://arxiv.org/abs/1307.7213]]

[[What Planck does not tell us about inflation|http://arxiv.org/abs/1307.7095]]

[[Pre-slow roll initial conditions: large scale power suppression and infrared aspects during inflation|http://arxiv.org/abs/1307.4066]]

[[CMB Power Asymmetry from Primordial Sound Speed Parameter|http://arxiv.org/abs/1307.4090]]

[[Planck 2013 Results. XXIV. Constraints on primordial non-Gaussianity|http://arxiv.org/abs/1303.5084]]

[[Planck 2013 results. XXII. Constraints on inflation|http://arxiv.org/abs/1303.5082]]

[[On the power spectrum generated during inflation|http://arxiv.org/abs/1302.2995]]

[[Cosmological Parameters from Pre-Planck CMB Measurements|http://arxiv.org/abs/1302.1841]]

[[The Atacama Cosmology Telescope: Cosmological parameters from three seasons of data|http://arxiv.org/abs/1301.0824]]

[[Constraints on Cosmology from the Cosmic Microwave Background Power Spectrum of the 2500-square degree SPT-SZ Survey|http://arxiv.org/abs/1212.6267]]

[[Nine-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Cosmological Parameter Results|http://arxiv.org/abs/1212.5226]]

[[The intrinsic bispectrum of the Cosmic Microwave Background|http://arxiv.org/abs/1302.0832]]

[[Bayesian Model Averaging in Astrophysics: A Review|http://arxiv.org/abs/1302.1721]]

[[What if Planck's Universe isn't flat?|http://arxiv.org/abs/1302.1617]]

[[Seeking Inflation Fossils in the Cosmic Microwave Background|http://arxiv.org/abs/1302.1868]]

[[Chameleonic inflation|http://arxiv.org/abs/1301.6756]]

[[Relic gravitational waves in the frame of slow-roll inflation with a power-law potential and the detection|http://arxiv.org/abs/1301.6351]]

[[Early Universe Tomography with CMB and Gravitational Waves|http://arxiv.org/abs/1301.1778]]

[[An Analysis On Ward Identity For Multi-Field Inflation|http://arxiv.org/abs/1212.6960]]

[[CMB Bispectrum from Non-linear Effects during Recombination|http://arxiv.org/abs/1212.6968]]

[[Patchy Screening of the Cosmic Microwave Background by Inhomogeneous Reionization|http://arxiv.org/abs/1210.5507]]

[[Violation of non-Gaussianity consistency relation in a single field inflationary model|http://arxiv.org/abs/1210.3692]]

[[A Quantum Gravity Extension of the Inflationary Scenario|http://arxiv.org/abs/1209.1609]]

[[The Gravitational Horizon for a Universe with Phantom Energy|http://arxiv.org/abs/1206.6192]]

[[Loop corrections and a new test of inflation|http://arxiv.org/abs/1207.1772]]

[[First measurement of the bulk flow of nearby galaxies using the cosmic microwave background|http://arxiv.org/abs/1207.1721]]

[[Anomalous High Energy Dependence in Inflationary Density Perturbations|http://arxiv.org/abs/1207.0100]]

[[Fully nonlinear and exact perturbations of the Friedmann world model|http://arxiv.org/abs/1207.0264]]

[[A Statistical Approach to Multifield Inflation: Many-field Perturbations Beyond Slow Roll|http://arxiv.org/abs/1207.0317]]

[[The consistency condition for the three-point function in dissipative single-clock inflation|http://arxiv.org/abs/1206.7083]]

[[The Gravitational Horizon for a Universe with Phantom Energy|http://arxiv.org/abs/1206.6192]]

[[First CMB Constraints on Direction-Dependent Cosmological Birefringence from WMAP-7|http://arxiv.org/abs/1206.5546]]

[[Reheating, Multifield Inflation and the Fate of the Primordial Observables|http://arxiv.org/abs/1206.5196]]

[[Detecting candidate cosmic bubble collisions with optimal filters|http://arxiv.org/abs/1206.5035]]

[[Multiple Cosmic Collisions and the Microwave Background Power Spectrum|http://arxiv.org/abs/1206.5038]]

[[The Matter Bounce Alternative to Inflationary Cosmology|http://arxiv.org/abs/1206.4196]]

[[A Hydrodynamical Approach to CMB mu-distortions|http://arxiv.org/abs/1206.4479]]

[[Mixed inflaton and curvaton scenario with sneutrinos|http://arxiv.org/abs/1206.4944]]

[[Constraining Isocurvature Perturbations with Cosmic Microwave Background Polarization|http://prl.aps.org/abstract/PRL/v87/i19/e191301]]

[[Observational constraints on K-inflation models|http://arxiv.org/abs/arXiv:1204.6214]]

[[The Cosmological Constant Problem, Dark Energy, and the Landscape of String Theory|http://arxiv.org/abs/1203.0307]]

[[Constraints on scalar and tensor perturbations in phenomenological and two-field inflation models: Bayesian evidences for primordial isocurvature and tensor modes|http://arxiv.org/abs/1202.2852]]

[[Optimal filters for detecting cosmic bubble collisions|http://arxiv.org/abs/1202.2861]]



config.options.txtUserName = "WillKinney";
config.options.txtBackupFolder = "backup";
config.options.chkDisableWikiLinks = 1;
[[TiddlyWiki|http://www.tiddlywiki.com]] © Osmosoft
\begin{equation}
G_{\mu \nu} = 8 \pi G T_{\mu \nu},
\end{equation}
\begin{equation}
\Omega(t) = 1 + \frac{k}{\left(a H\right)^2}.
\end{equation}
\begin{equation}
\varphi^{\prime\prime} + 2 \left(\frac{a'}{a}\right) \varphi' - \nabla^2 \varphi = 0,
\end{equation}
\begin{equation}
\frac{d \Omega}{d \ln{a}} = \left(1 + 3 w\right) \Omega \left(\Omega - 1\right).
\end{equation}
\begin{equation}
D_\nu T^{\mu \nu} = 0,
\end{equation}
\begin{equation}
\frac{\delta T}{T} = \frac{1}{3} \left[\Phi_{\rm em} - \Phi_{\rm obs}\right],
\end{equation}
\begin{equation}
u_k\left(\tau\right) = \frac{1}{\sqrt{2 k}}\left( A_k e^{-i k \tau} + B_k e^{i k \tau}\right).
\end{equation}
\begin{equation}
{\mathcal L}_\phi = \frac{1}{2} g^{\mu\nu} \partial_\mu \phi \partial_\nu \phi - V\left(\phi\right).
\end{equation}
\begin{equation}
\left[\hat b_{\bf k}, \hat b^\dagger_{\bf k'}\right] \equiv \delta^3\left({\bf k} - {\bf k}'\right).
\end{equation}
\begin{equation}
\varphi\left(\tau,{\bf x}\right) = \int{\frac{d^3 k}{\left(2 \pi\right)^{3/2}} \left[\varphi_{\bf k}\left(\tau\right) b_{\bf k} e^{i {\bf k} \cdot {\bf x}} + \varphi_{\bf k}^* \left(\tau\right) b_{\bf k}^* e^{-i {\bf k} \cdot {\bf x}}\right]}.
\end{equation}
\begin{equation}
\Theta \simeq 3 H \left[1 - A - \frac{1 }{ a H} \frac{\partial {\cal R} }{ \partial \tau}\right].
\end{equation}
\begin{equation}
d_{\rm H}\left(t\right) = \int_{0}^{t}{\frac{dt'}{a\left(t'\right)}} = \int_{0}^{\tau}{d\tau'} = \tau.
\end{equation}
\begin{equation}
ds^2 = a^2\left(\tau\right) \left[d\tau^2 - \left\vert d{\bf x}\right\vert^2\right].
\end{equation}
\begin{equation}
r \equiv \frac{P_T}{P_S} = 16 \epsilon = - 8 n_T.
\end{equation}
\begin{equation}
T^{\mu\nu}{}_{\!;\nu} = \dot\rho + 3 \left(\frac{\dot a}{a}\right) \left(\rho + p\right) = 0.
\end{equation}
\begin{equation}
g_{\mu\nu} = \left(
\begin{array}{cccc}
1& & & \\
 &-a^2(t)& & \\
 & &-a^2(t)& \\
 & & &-a^2(t)
\end{array}
\right),
\end{equation}
\begin{equation}
\sqrt{\left\langle{\cal R}^2\right\rangle} = \frac{H^2 }{ 2 \pi \dot\phi} = \frac{H}{m_{\rm Pl} \sqrt{\pi \epsilon}},
\end{equation}
\begin{equation}
u_k = \frac{1}{\sqrt{2 k}} \left(\frac{k \tau - i}{k\tau}\right) e^{-i k \tau}.
\end{equation}
\begin{equation}
\tau = - \frac{1}{H} e^{-H t} = - \frac{1}{a H}.
\end{equation}
\begin{equation}
N = - \int{H d t} = - \int{\frac{H }{ \dot\phi} d\phi}.
\end{equation}
\begin{equation}
\Omega \equiv \left(\frac{\rho}{\rho_c}\right) = \frac{8 \pi}{3 m_{\rm Pl}^2} \frac{\rho}{H^2}. 
\end{equation}
\begin{equation}
\epsilon \equiv \frac{3}{2} \left(\frac{p}{\rho} + 1\right) = \frac{4 \pi}{m_{\rm Pl}^2} \left(\frac{\dot\phi}{H}\right)^2.
\end{equation}
\begin{equation}
\eta \equiv -\frac{\ddot \phi}{H \dot\phi} = \epsilon + \frac{1}{2 \epsilon}\frac{d \epsilon}{d N}.
\end{equation}
\begin{equation}
u_\mu \equiv \frac{\phi_{,\mu}}{\sqrt{g^{\alpha \beta} \phi_{,\alpha} \phi_{,\beta}}}.
\end{equation}
\begin{equation}
\left(\nabla f\right)^\mu \equiv h^{\mu\nu} f_{,\nu}.
\end{equation}
\begin{equation}
\dot\phi \equiv u^{\mu} \phi_{,\mu} = \sqrt{g^{\alpha \beta} \phi_{,\alpha} \phi_{,\beta}}.
\end{equation}
\begin{eqnarray}
\rho &&\equiv \frac{1 }{ 2} \dot\phi^2 + V\left(\phi\right),\cr
p &&\equiv \frac{1 }{ 2} \dot\phi^2 - V\left(\phi\right).
\end{eqnarray}
\begin{equation}
\dot f \equiv u^{\mu} f_{,\mu}.
\end{equation}
\begin{equation}
\left(\frac{\lambda}{d_{\rm H}}\right)^2 \left\vert \Omega - 1\right\vert = {\rm const.}
\end{equation}
\begin{equation}
T^{\mu}{}_{\!\nu} = \left(\begin{array}{cccc}
\rho\left(t\right)& & & \\
 &-p\left(t\right)& & \\
 & &-p\left(t\right)& \\
 & & &-p\left(t\right)
\end{array}\right),
\end{equation}
\begin{equation}
{\mathcal L} = \frac{1}{2} g^{\mu\nu} \partial_\mu \varphi \partial_\nu \varphi,
\end{equation}
\begin{equation}
\frac{1}{\sqrt{-g}} \partial_\nu \left(g^{\mu\nu} \sqrt{-g} \partial_\mu \varphi\right) = 0.
\end{equation}
\begin{equation}
n = 3 - 2 \nu = 3 -  \frac{3 - \epsilon}{1 - \epsilon} \simeq -2 \epsilon.
\end{equation}
\begin{equation}
p = h \nu \propto a^{-1}(t).
\end{equation} 
\begin{equation}
u_k \propto \sqrt{- k \tau} \left[J_\nu\left(-k \tau\right) + i Y_\nu\left(-k \tau\right)\right].
\end{equation}
\begin{eqnarray}
&&\left(\frac{\dot a}{a}\right)^2 + \frac{k}{a^2} = \frac{8 \pi}{3m_{\rm Pl}^2} \rho,\cr
&&\left(\frac{\ddot a}{a}\right) = - \frac{4 \pi}{3 m_{\rm Pl}^2} \left(\rho + 3 p\right).
\end{eqnarray}
\begin{eqnarray}
ds^2 &=& dt^2 - a^2\left(t\right) d {\bf x}^2\cr
&=& dt^2 - a^2\left(t\right) \left[ \frac{dr^2}{1 - k r^2} + r^2 d\Omega^2\right],
\end{eqnarray}
\begin{equation}
\ddot\phi + \Theta \dot\phi + V'\left(\phi\right) = 0.
\end{equation}
\begin{equation}
u_k \propto \sqrt{- k \tau} \left[J_\nu\left(-k \tau\right) \pm i Y_\nu\left(-k \tau\right)\right],
\end{equation}
\begin{equation}
T_{\mu \nu} = \phi_{,\mu} \phi_{,\nu} - g_{\mu \nu} \left[ \frac{1}{2} g^{\alpha\beta} \phi_{,\alpha} \phi_{,\beta} - V\left(\phi\right) \right].
\end{equation}
\begin{equation}
u_k^{\prime\prime} + \left[k^2 - \frac{a^{\prime\prime}}{a}\right] u_k = 0.
\end{equation}
\begin{equation}
u_k \propto a\ \Rightarrow \varphi_k = {\rm const.}
\end{equation}
\begin{equation}
P^{1/2}\left(k\right) \simeq \left(\frac{H}{2 \pi}\right)_{k = a H},
\end{equation}
\begin{equation}
\ddot\phi + 3 H \dot\phi + V'\left(\phi\right) = 0.
\end{equation}
\begin{equation}
\tau = - \left(\frac{1}{a H}\right) \left(\frac{1}{1 - \epsilon}\right),
\end{equation}
\begin{eqnarray}
\rho &=& \frac{1}{2} \dot\phi^2 + V\left(\phi\right),\cr
p &=& \frac{1}{2} \dot\phi^2 - V\left(\phi\right).
\end{eqnarray}
\begin{equation}
N \geq 68 + \ln{\left(\frac{\Lambda}{m_{\rm Pl}}\right)}.
\end{equation}
\begin{equation}
d N \equiv - H dt,
\end{equation}
\begin{equation}
\phi_N = m_{\rm Pl}\sqrt{\frac{N + 1}{\pi}},
\end{equation}
\begin{equation}
\left[\varphi\left(\tau,{\bf x}\right), \Pi\left(\tau,{\bf x}'\right)\right] = i \delta^3\left({\bf x} - {\bf x}'\right)
\end{equation}
\begin{equation}
\left\vert A_k\right\vert^2 - \left\vert B_k\right\vert^2 = 1.
\end{equation}
\begin{equation}
\varphi\left(\tau,{\bf x}\right) = \int{\frac{d^3 k}{\left(2 \pi\right)^{3/2}} \left[\varphi_{\bf k}\left(\tau\right) b_{\bf k} e^{i {\bf k} \cdot {\bf x}} + {\rm H.C.}\right]}
\end{equation}
\begin{equation}
H^2 = \left(\frac{\dot a}{a}\right)^2 = \frac{8 \pi}{3 m_{\rm Pl}^2} \left[\frac{1}{2} \dot\phi^2 + V\left(\phi\right)\right],
\end{equation}
\begin{equation}
S = \int{d^4 x \sqrt{- g} {\mathcal L}_\phi},
\end{equation}
\begin{equation}
\ddot \phi \ll 3 H \dot\phi,
\end{equation}
\begin{equation}
T_{\mu\nu} = \rho u_\mu u_\nu + p h_{\mu \nu},
\end{equation}
\begin{equation}
H^2 \simeq  \frac{8 \pi}{3 m_{\rm Pl}^2} V\left(\phi\right).
\end{equation}
\begin{eqnarray}
N &=& - \int{H dt} = - \int{\frac{H}{\dot \phi} d\phi}
= \frac{2 \sqrt{\pi}}{m_{\rm Pl}} \int{\frac{d \phi}{\sqrt{\epsilon}}}\cr
&\simeq& \frac{8 \pi}{m_{\rm Pl}^2} \int_{\phi_e}^{\phi} \frac{V\left(\phi\right)}{V'\left(\phi\right)} d\phi,
\end{eqnarray}
\begin{equation}
3 H \dot\phi + V'\left(\phi\right) \simeq 0.
\end{equation}
\begin{equation}
\epsilon = \frac{4 \pi}{m_{\rm Pl}^2} \left(\frac{\dot\phi}{H}\right)^2 \simeq \frac{m_{\rm Pl}^2}{16 \pi} \left(\frac{V'\left(\phi\right)}{V\left(\phi\right)}\right)^2.
\end{equation}
\begin{eqnarray}
\eta &=& - \frac{\ddot \phi}{H \dot\phi}\cr
&\simeq& \frac{m_{\rm Pl}^2}{8 \pi} \left[\frac{V^{\prime\prime}\left(\phi\right)}{V\left(\phi\right)} - \frac{1}{2} \left(\frac{V'\left(\phi\right)}{V\left(\phi\right)}\right)^2\right],
\end{eqnarray}
\begin{equation}
\left[P\left(k\right)\right]^{1/2} \longrightarrow 2^{\nu - 3/2} \frac{\Gamma\left(\nu\right)}{\Gamma\left(3/2\right)} \left(1 - \epsilon\right) \left(\frac{H}{2 \pi}\right) \left(\frac{k}{a H \left(1 - \epsilon\right)}\right)^{3/2 - \nu},
\end{equation}
\begin{eqnarray}
&&\delta g_{0i} = \delta g_{i 0} = 0\cr
&&\delta g_{ij} = \frac{32 \pi}{m_{\rm Pl}} \left( \varphi_+ \hat e^{+}_{ij}  + \varphi_\times \hat e^{\times}_{ij}\right),
\end{eqnarray}
\begin{equation}
u_k = \frac{1}{2}\sqrt{\frac{\pi}{k}} \sqrt{- k \tau} \left[J_\nu\left(-k \tau\right) + i Y_\nu\left(-k \tau\right)\right].
\end{equation}
\begin{eqnarray}
\Theta &&= u^\mu{}_{\!;\mu} = u^0{}_{\!,0} + \Gamma^\alpha{}_{\!\alpha 0} u^0\cr
&&= 3 H \left[1 - A - \frac{1 }{ a H} \left(\frac{\partial {\cal R} }{ \partial \tau} + \frac{1 }{ 3} \partial_i \partial_i \frac{\partial H_T }{ \partial \tau}\right)\right],
\end{eqnarray}
\begin{eqnarray}
H \left\vert 0\right\rangle &=& \int^{\infty}_{-\infty}{d^3 k \left[\hbar \omega_k \left({\hat a_{\bf k}}^{\dagger} {\hat a_{\bf k}} + {1 \over 2}\right)\right]} \left\vert 0 \right\rangle\cr
&=& \left[\int^{\infty}_{-\infty}{d^3 k \left(\hbar \omega_k / 2\right)}\right] \left\vert 0 \right\rangle\cr
&=& \infty.
\end{eqnarray}
\begin{equation}
\phi =  \int{d^3 k \left[a_{\bf k} u_{\bf k}(t) e^{i {\bf k}\cdot{\bf x}} + a^*_{\bf k} u^*_{\bf k}(t) e^{- i {\bf k}\cdot{\bf x}}\right]},
\end{equation}
\begin{equation}
u_{\bf k} \propto e^{-i \omega_k t},
\end{equation}
\begin{equation}
\frac{\partial^2 \phi }{ \partial t^2} - \nabla^2 \phi = 0.
\end{equation}
\begin{equation}
u_{\bf k} = A(k) e^{-i \omega t + i {\bf k} \cdot {\bf x}} + B(k) e^{i \omega t - i {\bf k} \cdot {\bf x}}.
\end{equation}
\begin{equation}
\frac{1 - X_{\rm e}}{X_{\rm e}^2} = \frac{4 \sqrt{2} \zeta(3)}{\sqrt{\pi}} \eta 
\left(\frac{T}{m_{\rm e}}\right)^{3/2} \exp\left(\frac{13.6\ {\rm eV}}{
T}\right).
\end{equation}
[img(100%,auto)[./images/Inflation/lightconecmb.png]]

//A conformal diagram of the Cosmic Microwave Background. Two points on opposite sides of the sky are causally separate, since their past light cones do not intersect.// 
[img(100%,auto)[./images/Inflation/COBE.png]]

//The COBE measurement of the CMB anisotropy  ```Bennett, //et al.,// (1996)  [[Four year COBE DMR cosmic microwave background observations: Maps and basic results|http://inspirehep.net/search?ln=en&p=Bennett:1996ce&of=hd]]``` . The top oval is a map of the sky showing the dipole anisotropy \(\Delta T / T \sim 10^{-3}\). The bottom oval is a similar map with the dipole contribution and emission from our own galaxy subtracted, showing the anisotropy for \(\ell > 1\), \(\Delta T / T \sim 10^{-5}\). (Figure courtesy of the COBE Science Working Group.)// 
[img(100%,auto)[./images/Inflation/WMAPCl.png]]

//The \(C_\ell\) spectrum for the CMB as measured by WMAP, showing the peaks characteristic of acoustic oscillations. The gray shaded region represents the uncertainty due to cosmic variance. (Figure courtesy of the WMAP Science Working Group.)// 
[img(100%,auto)[./images/Inflation/lightcone.png]]

//A conformal diagram of a Friedmann-Robertson-Walker space. The FRW space is causally identical to Minkowski Space, except that it is not past-infinite, so that past light cones are "cut off" at the Big Bang, which is a spatially infinite surface of redshift redshift \(z = \infty\) at time \(\tau = 0\). // 
[img(100%,auto)[./images/Inflation/FRWFoliation.png]]

//Foliations of an FRW spacetime. Comoving hypersurfaces (dashed lines) have constant density, but another choice of gauge (solid lines) will have unphysical density fluctuations which are an artifact of the choice of gauge.// 
[img(100%,auto)[./images/Inflation/lightconeinfl.png]]

//A conformal diagram of light cones in an inflationary universe. Inflation ends in reheating at conformal time \(\tau = 0\), which is the onset of the radiation-dominated expansion of the hot Big Bang. However, inflation provides a "sea" of negative conformal time, which allows the past light cones of events at the last scattering surface to overlap.// 
[img(100%,auto)[./images/Inflation/HorizonCrossingCMB.png]]

//Superhorizon modes in the CMB: wavelengths of order the horizon size today were superhorizon at the last scattering surface (LSS).//
[img(100%,auto)[./images/Inflation/eso0419d.jpg]]

Constraints on \(\Omega_\Lambda\) and \(\Omega_{\rm m}\) from different astrophysical observations. (Image credit: [[ESO|http://www.eso.org/public/images/eso0419d/]]). 
[img(100%,auto)[./images/Inflation/SNIa.png]]
//Data from the Supernova Cosmology project. Dimmer objects are  higher vertically on the plot. The horizontal axis is redshift. The curves represent different choices of \(\Omega_{\rm m}\) and \(\Omega_\Lambda\). A cosmology with \(\Omega_{\rm m} = 1\) and  \(\Omega_\Lambda = 0\) is ruled out to 99% confidence, while a universe with  \(\Omega_{\rm M} = 0.3\) and \(\Omega_{\Lambda} = 0.7\) is a good fit to the data. Image: Perlmutter, //et. al// (1998) ```Perlmutter, //et al.,// (1999)  [[Measurements of Omega and Lambda from 42 high redshift supernovae|http://inspirehep.net/search?ln=en&p=Perlmutter:1998np&of=hd]]```  //
[img(100%,auto)[./images/Inflation/ThreeScales.png]]

//Longer wavelengths exit the horizon earlier in inflation, and re-enter the horizon later. Perturbations which exited the horizon with \(N > 60\) are still larger than our horizon today, shown as a light green circle on the figure. Perturbations which exited the horizon with \(N < 60\) are smaller than our horizon today.// 
[img(100%,auto)[./images/Inflation/WMAP.png]]

//The WMAP measurement of the CMB anisotropy  ```Hinshaw, //et al.,// (2009)  [[Five-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Data Processing, Sky Maps, and Basic Results|http://inspirehep.net/search?ln=en&p=Hinshaw:2008kr&of=hd]]``` . (Figure courtesy of the WMAP Science Working Group.) WMAP measured the anisotropy with much higher sensitivity and resolution than COBE.//
[img(100%,auto)[./images/Inflation/WMAP5Labeled.png]]

//Constraints on the \(r\), \(n\) plane from Cosmic Microwave Background measurements. Shaded regions are the regions allowed by the WMAP5 measurement to 68\% and 95\% confidence. Models plotted are "large-field" potentials \(V\left(\phi\right) \propto \phi^2\) and \(V\left(\phi\right) \propto \phi^4\).//
[img(100%,auto)[./images/Inflation/WMAP5logLabeled.png]]

//Constraints on the \(r\), \(n\) plane from Cosmic Microwave Background measurements, with the tensor/scalar ratio plotted on a log scale. In addition to the [[large-field models |fig:WMAPrn]], three small-field models are plotted against the data: "Natural Inflation" from a pseudo-Nambu-Goldstone boson  ```Freese, //et al.,// (1990)  [[Natural inflation with pseudo - Nambu-Goldstone bosons|http://inspirehep.net/search?ln=en&p=Freese:1990rb]]&of=hd]]``` with potential \(V\left(\phi\right) = \Lambda^4 \left[1 - \cos{\left(\phi / \mu\right)}\right]\), a logarithmic potential \(V\left(\phi\right) \propto \ln{\left(\phi\right)}\) typical of supersymmetric models  ```Stewart (1995)  [[Inflation, supergravity and superstrings|http://inspirehep.net/search?ln=en&p=Stewart:1994ts&of=hd]]```  ```Dvali, //et al.,// (1994)  [[Large scale structure and supersymmetric inflation without fine tuning|http://inspirehep.net/search?ln=en&p=Dvali:1994ms&of=hd]]```  ```Barrow & Parsons (1995)  [[Inflationary models with logarithmic potentials|http://inspirehep.net/search?ln=en&p=Barrow:1995xb&of=hd]]``` , and a Coleman-Weinberg potential \(V\left(\phi\right) \propto \phi^4 \ln{\left(\phi\right)}\).
[img(100%,auto)[./images/Inflation/AgeMD.png]]

//Age of the universe as a function of \(\Omega_{\rm m}\) for a matter-only universe. The blue shaded region shows the age \(t_0\) consistent with the HST key project value  \(H_0 = 72 \pm 8\ {\rm km/s/Mpc}\). The red area is the region consistent with globular  cluster ages \(t_0 > 12\ {\rm Gyr}\).//
[img(100%,auto)[./images/Inflation/AgeLCDM.png]]

//Age of the universe as a function of \(\Omega_{\rm m}\) for a flat universe with mixed matter and cosmological constant// (\(\Omega_{\rm m} + \Omega_{\Lambda} = 1\)). //The green shaded region shows the age \(t_0\) consistent with the HST key project value  \(H_0 = 72 \pm 8\ {\rm km/s/Mpc}\). The red area is the region consistent with globular  cluster ages \(t_0 > 12\ {\rm Gyr}\). The error bar shows the WMAP 7-year constraint \(t_0 = 13.75\pm0.13\ {\rm Gyr}\), \(\Omega_\Lambda = 0.734\pm0.029\). ```Larson, //et al.,// (2011)  [[Seven-Year Wilkinson Microwave Anisotropy Probe (WMAP) Observations: Power Spectra and WMAP-Derived Parameters|http://inspirehep.net/search?ln=en&p=Larson:2010gs&of=hd]]``` //
[img(100%,auto)[./images/Inflation/matterbox.png]]

//A comoving box full of matter. The energy density in matter scales inversely with the volume of the box.//
[img(100%,auto)[./images/Inflation/radbox.png]]

//A comoving box full of radiation. The number density of photons scales inversely with the volume of the box, but the photons also increase in wavelength.//
[img(100%,auto)[./images/Inflation/vacbox.png]]

//A comoving box full of vacuum. The energy density of vacuum does not scale at all!//
[img(100%,auto)[./images/Inflation/foliation.png]]

//A comoving foliation of spacetime. Spatial hypersurfaces are everywhere orthogonal to the fluid four-velocity \(u^{\mu}\).
[img(100%,auto)[./images/Inflation/grid_infl.gif]]

//The horizon in an inflationary cosmology. The horizon (green circle) decreases in size relative to comoving coordinates (red grid), and two observers who are initially causally connected become causally disconnected at late time.//
[img(100%,auto)[./images/Inflation/grid_md.gif]]

//The horizon in a matter-dominated cosmology. The horizon (green circle) increases in size relative to comoving coordinates (red grid). The blue dots represent two comoving observers who are initially causally disconnected, and later "fall into" each others' horizons.//
[img(100%,auto)[./images/Inflation/horizoninfl.png]]

//A conformal diagram of the horizon in an inflationary universe. The comoving horizon shrinks during inflation, and grows during the radiation- and matter-dominated expansion, while the comoving wavelengths of perturbations remain constant. This drives comoving perturbations to "superhorizon" scales.// 
[img(100%,auto)[./images/Inflation/hybrid.png]]

//A  schematic of a hybrid potential.// 
[img(100%,auto)[./images/Inflation/deltaN.png]]

//Flat and comoving hypersurfaces.// 
[img(100%,auto)[./images/Inflation/potential.png]]

//A schematic of the potential for inflation. Inflation takes place on the region of the potential which is sufficiently "flat", and reheating takes place near the true vacuum for the field.// 
[img(100%,auto)[./images/Inflation/largefield.png]]

//A schematic of a large-field potential.// 
[img(100%,auto)[./images/Inflation/lastscattering.png]]

//Cartoon of the last scattering surface. From earth, we see blackbody radiation emitted uniformly from all directions, forming a "sphere" at  redshift \(z = 1100\).//
[img(100%,auto)[./images/Inflation/modefunction.png]]

//The normalized mode function in de Sitter space, showing oscillatory behavior on subhorizon scales \(k / a H > 1\), and mode freezing on superhorizon scales, \(k / a H < 1\). // 
[img(100%,auto)[./images/Inflation/psmodulation.png]]

//Modulation of the power spectrum of primordial fluctuations for a rotation \(B \sim H / m_{\rm Pl}\). //
[img(100%,auto)[./images/Inflation/recombination.png]]

//Schematic diagram of recombination. At early time, the temperature of the universe is above the ionization energy of hydrogen and helium, so that the universe is full of an ionized plasma, and the mean free path for photons is short compared to the Hubble length. At late time, the temperature drops and the nuclei capture the electrons and form neutral atoms. Once this happens, the universe becomes transparent to photons, which free stream from the surface of last scattering. //
[img(100%,auto)[./images/Inflation/rhovsz.png]]

//Schematic diagram of how the three types of stress-energy scale with \(a\): at early time, radiation dominates, followed by matter, and finally the universe is dominated by vacuum energy.//
[img(100%,auto)[./images/Inflation/smallfield.png]]

//A schematic of a small-field potential.// 
[img(100%,auto)[./images/Inflation/TimelikeCongruence.png]]

//A timelike congruence in spacetime. Each event \(P\) is intersected by exactly one world line in the congruence.// 
//{{{
//This ensures that the footer sticks to the bottom of the screen when there are no tiddlers open. If that is not desirable, it can be deleted.
function setFooter() {
         if (document.getElementById && document.getElementById("contentFooter") ) {
            var windowHeight=findWindowHeight();
         if (windowHeight>0) {
            var contentHeight= document.getElementById('mainMenu').offsetHeight + document.getElementById("header").offsetHeight + document.getElementById("contentFooter").offsetHeight;
            var menu= document.getElementById('mainMenu');
            if (windowHeight-(contentHeight)>=0) {
               menu.style.position='relative';
               menu.style.marginBottom=(windowHeight-(contentHeight))+'px';
               }
            else {
                 menu.style.position='';
                 menu.style.marginBottom='';
                 }
            }
         }
}
window.onresize = function() {
  setFooter();
}

Story.prototype.refreshTiddler_footerhack=Story.prototype.refreshTiddler;
Story.prototype.refreshTiddler = function (title,template,force)
{    
var theTiddler = Story.prototype.refreshTiddler_footerhack.apply(this,arguments);
setFooter();
   return theTiddler;}

//}}}
version.extensions.DisableWikiLinksPlugin= {major: 1, minor: 6, revision: 0, date: new Date(2008,7,22)};

if (config.options.chkDisableNonExistingWikiLinks==undefined) config.options.chkDisableNonExistingWikiLinks= false;
if (config.options.chkDisableWikiLinks==undefined) config.options.chkDisableWikiLinks=false;
if (config.options.txtDisableWikiLinksList==undefined) config.options.txtDisableWikiLinksList="DisableWikiLinksList";
if (config.options.chkAllowLinksFromShadowTiddlers==undefined) config.options.chkAllowLinksFromShadowTiddlers=true;
if (config.options.txtDisableWikiLinksTag==undefined) config.options.txtDisableWikiLinksTag="excludeWikiWords";

// find the formatter for wikiLink and replace handler with 'pass-thru' rendering
initDisableWikiLinksFormatter();
function initDisableWikiLinksFormatter() {
	for (var i=0; i<config.formatters.length && config.formatters[i].name!="wikiLink"; i++);
	config.formatters[i].coreHandler=config.formatters[i].handler;
	config.formatters[i].handler=function(w) {
		// supress any leading "~" (if present)
		var skip=(w.matchText.substr(0,1)==config.textPrimitives.unWikiLink)?1:0;
		var title=w.matchText.substr(skip);
		var exists=store.tiddlerExists(title);
		var inShadow=w.tiddler && store.isShadowTiddler(w.tiddler.title);
		// check for excluded Tiddler
		if (w.tiddler && w.tiddler.isTagged(config.options.txtDisableWikiLinksTag))
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
		// check for specific excluded wiki words
		var t=store.getTiddlerText(config.options.txtDisableWikiLinksList);
		if (t && t.length && t.indexOf(w.matchText)!=-1)
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
		// if not disabling links from shadows (default setting)
		if (config.options.chkAllowLinksFromShadowTiddlers && inShadow)
			return this.coreHandler(w);
		// check for non-existing non-shadow tiddler
		if (config.options.chkDisableNonExistingWikiLinks && !exists)
			{ w.outputText(w.output,w.matchStart+skip,w.nextMatch); return; }
		// if not enabled, just do standard WikiWord link formatting
		if (!config.options.chkDisableWikiLinks)
			return this.coreHandler(w);
		// just return text without linking
		w.outputText(w.output,w.matchStart+skip,w.nextMatch)
	}
}

Tiddler.prototype.coreAutoLinkWikiWords = Tiddler.prototype.autoLinkWikiWords;
Tiddler.prototype.autoLinkWikiWords = function()
{
	// if all automatic links are not disabled, just return results from core function
	if (!config.options.chkDisableWikiLinks)
		return this.coreAutoLinkWikiWords.apply(this,arguments);
	return false;
}

Tiddler.prototype.disableWikiLinks_changed = Tiddler.prototype.changed;
Tiddler.prototype.changed = function()
{
	this.disableWikiLinks_changed.apply(this,arguments);
	// remove excluded wiki words from links array
	var t=store.getTiddlerText(config.options.txtDisableWikiLinksList,"").readBracketedList();
	if (t.length) for (var i=0; i<t.length; i++)
		if (this.links.contains(t[i]))
			this.links.splice(this.links.indexOf(t[i]),1);
};
/***
|''Name:''|MathJaxPlugin|
|''Description:''|Enable LaTeX formulas for TiddlyWiki|
|''Version:''|1.0.1|
|''Date:''|Feb 11, 2012|
|''Source:''|http://www.guyrutenberg.com/2011/06/25/latex-for-tiddlywiki-a-mathjax-plugin|
|''Author:''|Guy Rutenberg|
|''License:''|[[BSD open source license]]|
|''~CoreVersion:''|2.5.0|
 
!! Changelog
!!! 1.0.1 Feb 11, 2012
* Fixed interoperability with TiddlerBarPlugin
!! How to Use
Currently the plugin supports the following delemiters:
* """\(""".."""\)""" - Inline equations
* """$$""".."""$$""" - Displayed equations
* """\[""".."""\]""" - Displayed equations
!! Demo
This is an inline equation \(P(E)   = {n \choose k} p^k (1-p)^{ n-k}\) and this is a displayed equation:
\[J_\alpha(x) = \sum_{m=0}^\infty \frac{(-1)^m}{m! \, \Gamma(m + \alpha + 1)}{\left({\frac{x}{2}}\right)}^{2 m + \alpha}\]
This is another displayed equation $$e=mc^2$$
!! Code
***/
//{{{
config.extensions.MathJax = {
  mathJaxScript : "http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",
  // uncomment the following line if you want to access MathJax using SSL
  // mathJaxScript : "https://d3eoax9i5htok0.cloudfront.net/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML",
  displayTiddler: function(TiddlerName) {
    config.extensions.MathJax.displayTiddler_old.apply(this, arguments);
    MathJax.Hub.Queue(["Typeset", MathJax.Hub]);
  }
};
 
jQuery.getScript(config.extensions.MathJax.mathJaxScript, function(){
    MathJax.Hub.Config({
      extensions: ["tex2jax.js"],
      "HTML-CSS": { scale: 100 }
    });
 
    MathJax.Hub.Startup.onload();
    config.extensions.MathJax.displayTiddler_old = story.displayTiddler;
    story.displayTiddler = config.extensions.MathJax.displayTiddler;
});
 
config.formatters.push({
	name: "mathJaxFormula",
	match: "\\\\\\[|\\$\\$|\\\\\\(",
	//lookaheadRegExp: /(?:\\\[|\$\$)((?:.|\n)*?)(?:\\\]|$$)/mg,
	handler: function(w)
	{
		switch(w.matchText) {
		case "\\[": // displayed equations
			this.lookaheadRegExp = /\\\[((?:.|\n)*?)(\\\])/mg;
			break;
		case "$$": // inline equations
			this.lookaheadRegExp = /\$\$((?:.|\n)*?)(\$\$)/mg;
			break;
		case "\\(": // inline equations
			this.lookaheadRegExp = /\\\(((?:.|\n)*?)(\\\))/mg;
			break;
		default:
			break;
		}
		this.lookaheadRegExp.lastIndex = w.matchStart;
		var lookaheadMatch = this.lookaheadRegExp.exec(w.source);
		if(lookaheadMatch && lookaheadMatch.index == w.matchStart) {
			createTiddlyElement(w.output,"span",null,null,lookaheadMatch[0]);
			w.nextMatch = this.lookaheadRegExp.lastIndex;
		}
	}
});
//}}}
config.formatters.push({
  name: "arXivLinks",
  match: "\\b(?:arXiv:)[0-9]{4}.[0-9]{4}\\b",
  element: "a",
  handler: function(w) {
    var e = createExternalLink(w.output, "http://arxiv.org/abs/"+w.matchText);
    e.target = "_blank"; // open in new window
    w.outputText(e,w.matchStart,w.nextMatch);
  }
});
config.formatters.push({
  name: "arXivLinks",
  match: "\\b(?:astro-ph|cond-mat|hep-ph|hep-th|hep-lat|gr-qc|nucl-ex|nucl-th|quant-ph|(?:cs|math|nlin|physics|q-bio)(?:\\.[A-Z]{2})?)/[0-9]{7}\\b",
  element: "a",
  handler: function(w) {
    var e = createExternalLink(w.output, "http://arxiv.org/abs/"+w.matchText);
    e.target = "_blank"; // open in new window
    w.outputText(e,w.matchStart,w.nextMatch);
  }
});
\begin{equation}
\ddot u_{\bf k} + k^2 u_{\bf k} = 0.
\end{equation}