\subtitle{Evaluating Performance}
\date{Monday (AM), 14 May 2018}
\begin{document}
	
	\begin{frame}
	\titlepage
	\end{frame}

	% What do we want?
	% What do we measure?
	% How do we measure it?
	
	\begin{frame}{What is Player Experience?}
		\begin{block}{Player experience}
			Collection of events that occur to the player during the game
			\note{Should be clear - it is only the events that occur because of the game that are important}	
		\end{block}
	\end{frame}

	\begin{frame}{What is Player Experience?}
		\begin{block}{Scenario}
			Jeffrey is playing a game in his bedroom. The game is an online RTS game, and he is playing with a friend online against two other people.			
		\end{block}
		\begin{block}{Question}
		Which of these are a part of the player experience and which are not?\note{All happen while the person is playing a game}
		\end{block}
		\begin{itemize}
			\item<2->{Losing a unit} \uncover<7->{Yes}
			\item<3->{Laundry finishing} \uncover<8->{No}
			\item<4->{Collecting resource} \uncover<9->{Yes}
			\item<5->{New message in chat window} \uncover<10->{Yes}
			\item<6->{Unit moving} \uncover<11->{Yes}
		\end{itemize}
	\note{\\ Anything that occurs during the game and as part of the game is part of the player experience. Which of these can be detected by an AI?}
	\end{frame}
	
	\section{Metrics}
	
	\begin{frame}
		Collect data on how players/bots work
		
		\begin{block}{Activity}
			What kinds of features can we collect?
		\end{block}
	\end{frame}

	\begin{frame}{Data from humans}
		\begin{itemize}[<+->]
			\item{High-level human experience}
				\begin{itemize}
					\item Final game scores?
					\item How long did they play for?
				\end{itemize}
			\item{Biosignals}
				\begin{itemize}
					\item Where did they look?
					\item Galvanic skin response
					\item BCI
				\end{itemize}
			\item{Surveys and interviews}
				\begin{itemize}
					\item Likert Scales
					\item Why did you feel that way?
				\end{itemize}
		\end{itemize}
	\end{frame}

	\begin{frame}{Data from bots}
		\begin{itemize}[<+->]
			\item Internal State
				\begin{itemize}
					\item Will depend on bot architecture
					\item Measure state visits in FSM
					\item Did the game make full use of the AI?
				\end{itemize}
			\item How many times does a bot face a difficult choice?
				\begin{itemize}
					\item What is a difficult choice? \note{Difficult Choice: MCTS - near identical branches, GA - No Convergence}
				\end{itemize}
		\end{itemize}
	\end{frame}

	\begin{frame}{Data from either}
		\note{Some things can be measured regardless of if a human or AI is playing	\begin{itemize}[<+->]}
		\begin{itemize}[<+->]
			\item Final Score distribution\note{\item How high, variation?}
			\item Game Duration \note{\item Length, range of lengths}
			\item Score ``Drama'' \note{\item Runaway victory?, keep changing hands? loop?}
			\item Statistical distribution of states \note{\item Some states not used at all? Some overused?}
			\item Degree of challenge \note{\item How to measure this?}
		\end{itemize}
	\note{\end{itemize}}
	\end{frame}

	\begin{frame}{Data from populations}
	Variability of scores, skill-depth
	\end{frame}


	\section{Action Sequences}

	\begin{frame}{Data from either}
		Actions taken, Record the sequence of button-pushes
	\end{frame}

	\begin{frame}{Entropy}
		\note{\begin{itemize}}
		\begin{itemize}[<+->]
			\item Sometimes used to interpret aspects of player experience
			\begin{itemize}
				\item $H(X) =  \sum_{i=1}^{n} P(x_{i})I(x_{i}) = -\sum_{i=1}^{n}P(x_{i})\log_{b}P(x_{i})$ \note{\item We won't worry too much about the middle definition}
				\item Take a fair coin - how much entropy?
				\item $H(fairCoint) = \sum_{i=1}^{2}(\frac{1}{2})\log_{2}(\frac{1}{2}) = -\sum_{i=1}^{2}(\frac{1}{2}) \times (-1) = 1 $ \note{\item Because it is a fair coin - each toss can tell us nothing}
				\item How about an unfair coin? What is the entropy for a coin of probability 0.9?
				\note{\item Whiteboard time if students stuck: \begin{itemize}}
				\note{\item Answer is: $ H(dodgyCoin) = \sum_{i=1}^{2}(0.9)\log_{2}(0.9) =  $}
				\note{\item Continued: $ -\Big( (0.9 \log_{2}0.9) + (0.1 \log_{2}0.1) \Big) = 0.47 $}
				\note{\end{itemize}}
			\end{itemize}
		\end{itemize}
		\begin{center}
			\uncover<6->{\includegraphics[scale=0.4]{entropy}\footnote<6->{Borrowed from \href{https://en.wikipedia.org/wiki/Entropy_(information_theory)}{wikipedia}}}
		\end{center}
		\note{\end{itemize}}
	\end{frame}

	\begin{frame}{A Game Example}
		\note{\begin{itemize}}
		\begin{columns}
			\note{\item Some sample 2D location visit counts}
			\only<1>{\begin{column}{0.45\textwidth}
				
				\begin{tabularx}{\linewidth}{l | l | l | l}
					 loc & 0 & 1 & 2  \\
					 \hline
					 0 & 10 & 20 & 15 \\
					 1 & 12 & 35 & 13 \\
					 2 & 15 & 20 & 10 \\
				\end{tabularx}
			\end{column}}
			\note{\item Converted into visit counts as fraction of total and then into probability of having visited that location}
			\only<1->{\begin{column}{0.45\textwidth}
				\begin{tabularx}{\linewidth}{l | l | l}
					loc & visits & p(loc)  \\
					\hline
					0,0 & $\frac{10}{150}$ & 0.067\\
					0,1 & $\frac{12}{150}$ & 0.08\\
					0,2 & $\frac{15}{150}$ & 0.1\\
					1,0 & $\frac{20}{150}$ & 0.134\\
					1,1 & $\frac{35}{150}$ & 0.234\\
					1,2 & $\frac{20}{150}$ & 0.134 \\
					2,0 & $\frac{15}{150}$ & 0.1\\
					2,1 & $\frac{13}{150}$ & 0.0867\\
					2,2 & $\frac{10}{150}$ & 0.067\\
				\end{tabularx}
			\end{column}}
		\note{\item Then we just perform the math as a giant summation. Computers are good at this}
		\note{\item Except computers are not keen on 0's}
		\only<2>{\begin{column}{0.45\textwidth}
				\begin{itemize}
					\item $H(X) = $
					\item $2\big(0.067\log_{2}(0.067)\big) + $
					\item $2\big(0.134\log_{2}(0.134)\big) + $
					\item $2\big(0.1\log_{2}(0.1)\big) + $
					\item $ \big(0.08\log_{2}(0.08)\big) + $
					\item $ \big(0.234\log_{2}(0.234)\big) + $
					\item $ \big(0.0867\log_{2}(0.0867)\big)$
				\end{itemize}
			\end{column}}	
		\end{columns}
	\note{\end{itemize}}
	\end{frame}

	%% METRICS
	% Simon's raw vs computed metrics.
	
	%% SKILL
	% Evaluating skill depth
	
\end{document}