[home] [search] BibTeX of Marcus Hutter [contact] [up] 

Publications: 2007, 2006, 2005, 2004, 2003, 2002, 2001, 2000, older.

%-------------Publications-of-Marcus-Hutter-2007--------------%

@Article{Hutter:07uspx,
  author =       "M. Hutter",
  title =        "On Universal Prediction and {B}ayesian Confirmation",
  journal =      "Theoretical Computer Science",
  volume =       "384",
  number =       "1",
  pages =        "33--48",
  _month =        sep,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#uspx",
  url =          "http://arxiv.org/abs/0709.1516",
  pdf =          "http://www.hutter1.net/ai/uspx.pdf",
  ps =           "http://www.hutter1.net/ai/uspx.ps",
  latex =        "http://www.hutter1.net/ai/uspx.tex",
  slides =       "http://www.hutter1.net/ai/susp.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#uai",
  press =        "http://www.hutter1.net/official/press.htm#???",
  doi =          "10.1016/j.tcs.2007.05.016",
  issn =         "0304-3975",
  keywords =     "Sequence prediction, Bayes, Solomonoff prior,
                  Kolmogorov complexity, Occam's razor, prediction bounds,
                  model classes, philosophical issues, symmetry principle,
                  confirmation theory, reparametrization invariance,
                  old-evidence/updating problem, (non)computable environments.",
  abstract =     "The Bayesian framework is a well-studied and successful framework
                  for inductive reasoning, which includes hypothesis testing and
                  confirmation, parameter estimation, sequence prediction,
                  classification, and regression. But standard statistical guidelines
                  for choosing the model class and prior are not always available or
                  fail, in particular in complex situations.
                  Solomonoff completed the Bayesian framework by providing a
                  rigorous, unique, formal, and universal choice for the model class
                  and the prior. We discuss in breadth how and in which sense
                  universal (non-i.i.d.) sequence prediction solves various
                  (philosophical) problems of traditional Bayesian sequence
                  prediction. We show that Solomonoff's model possesses many
                  desirable properties: Strong total and weak instantaneous bounds,
                  and in contrast to most classical continuous prior densities has
                  no zero p(oste)rior problem, i.e. can confirm universal
                  hypotheses, is reparametrization and regrouping invariant, and
                  avoids the old-evidence and updating problem. It even performs
                  well (actually better) in non-computable environments.",
}

@Article{Hutter:07mlconvxx,
  author =       "M. Hutter and An. A. Muchnik",
  title =        "On Semimeasures Predicting {Martin-L{\"o}f} Random Sequences",
  journal =      "Theoretical Computer Science",
  volume =       "382",
  number =       "3",
  pages =        "247--261",
  _month =        sep,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#mlconvxx",
  xhttp =         "http://www.hutter1.net/ai/mlconvxx.htm",
  url =          "http://arxiv.org/abs/0708.2319",
  pdf =          "http://www.hutter1.net/ai/mlconvxx.pdf",
  ps =           "http://www.hutter1.net/ai/mlconvxx.ps",
  latex =        "http://www.hutter1.net/ai/mlconvxx.tex",
  slides =       "http://www.hutter1.net/ai/smlconvx.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  doi =          "10.1016/j.tcs.2007.03.040",
  issn =         "0304-3975",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  universal enumerable semimeasure; mixture distributions;
                  posterior convergence; Martin-L{\"o}f randomness;
                  quasimeasures.",
  abstract =     "Solomonoff's central result on induction is that the posterior of
                  a universal semimeasure M converges rapidly and with probability
                  1 to the true sequence generating posterior mu, if the latter is
                  computable. Hence, M is eligible as a universal sequence predictor
                  in case of unknown mu. Despite some nearby results and proofs in
                  the literature, the stronger result of convergence for all
                  (Martin-Loef) random sequences remained open. Such a convergence
                  result would be particularly interesting and natural, since
                  randomness can be defined in terms of M itself. We show that there
                  are universal semimeasures M which do not converge for all random
                  sequences, i.e. we give a partial negative answer to the open
                  problem. We also provide a positive answer for some non-universal
                  semimeasures. We define the incomputable measure D as a mixture
                  over all computable measures and the enumerable semimeasure W as a
                  mixture over all enumerable nearly-measures. We show that W
                  converges to D and D to mu on all random sequences. The Hellinger
                  distance measuring closeness of two distributions plays
                  a central role.",
  support =      "SNF grant 2100-67712 and RFBF grants N04-01-00427 and N02-01-22001",
}
@Article{Hutter:07algprob,
  author =       "M. Hutter and S. Legg and P. M. B. Vit{\'a}nyi",
  title =        "Algorithmic Probability",
  journal =      "Scholarpedia",
  pages =        "19046",
  _month =        aug,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#algprob",
  http =         "http://www.scholarpedia.org/article/Algorithmic_Probability",
  pdf =          "http://www.hutter1.net/ai/algprob.pdf",
  ps =           "http://www.hutter1.net/ai/algprob.ps",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  keywords =     "algorithmic information theory,
                  algorithmic complexity,
                  discrete/continuous algorithmic probability,
                  Bayes, Occam, Epicurus,
                  applications, references",
  abstract =     "Algorithmic ``Solomonoff'' Probability (AP) assigns to objects an a
                  priori probability that is in some sense universal. This prior
                  distribution has theoretical applications in a number of areas,
                  including inductive inference theory and the time complexity
                  analysis of algorithms. Its main drawback is that it is not
                  computable and thus can only be approximated in practice",
}
@InProceedings{Hutter:07pquest,
  author =       "D. Ryabko and M. Hutter",
  title =        "On Sequence Prediction for Arbitrary Measures",
  booktitle =    "Proc. IEEE International Symposium on Information Theory ({ISIT'07})",
  pages =        "2346--2350",
  _editor =       "A. Goldsmith and M. Medard and A. Shokrollahi and R. Zamir",
  publisher =    "IEEE",
  address =      "Nice, France",
  _month =        jun,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#pquest",
  url =          "http://arxiv.org/abs/cs.LG/0606077",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-13-06.pdf",
  pdf =          "http://www.hutter1.net/ai/pquest.pdf",
  ps =           "http://www.hutter1.net/ai/pquest.ps",
  latex =        "http://www.hutter1.net/ai/pquest.tex",
  slides =       "http://www.hutter1.net/ai/spquest.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#bayes",
  doi =          "??",
  isbn =         "1-4244-1429-6",
  keywords =     "sequence prediction, local absolute continuity,
                  non-stationary measures, average/expected criteria,
                  absolute/KL divergence, mixtures of measures.",
  abstract =     "Suppose we are given two probability measures on the set of
                  one-way infinite finite-alphabet sequences. Consider the
                  question when one of the  measures predicts the other, that is,
                  when conditional probabilities  converge (in a certain sense), if
                  one of the measures is chosen to generate the sequence. This
                  question may be considered a refinement of the problem of sequence
                  prediction in its most general formulation: for a given  class of
                  probability measures, does there exist a measure which predicts
                  all of the measures in the class? To address this problem, we find
                  some conditions on local absolute continuity which are sufficient
                  for prediction and  generalize several different notions
                  that are known to be sufficient for prediction. We also formulate
                  some open questions to outline a direction for finding the
                  conditions on classes of measures for which prediction is
                  possible.",
  support =      "SNF grant 200020-107616",
}
@InProceedings{Hutter:07idefs,
  author =       "S. Legg and M. Hutter",
  title =        "A Collection of Definitions of Intelligence",
  booktitle =    "Advances in Artificial General Intelligence: Concepts, Architectures and Algorithms",
  series =       "Frontiers in Artificial Intelligence and Applications",
  volume =       "157",
  pages =        "17--24",
  editor =       "B. Goertzel and P. Wang",
  publisher =    "IOS Press",
  address   =    "Amsterdam, NL",
  _month =        jun,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#idefs",
  url =          "http://arxiv.org/abs/0706.3639",
  http =         "http://www.idsia.ch/~shane/intelligence.html",
  pdf =          "http://www.hutter1.net/ai/idefs.pdf",
  ps =           "http://www.hutter1.net/ai/idefs.ps",
  latex =        "http://www.hutter1.net/ai/idefs.tex",
  project =      "http://www.hutter1.net/official/projects.htm#uai",
  isbn =         "978-1-58603-758-1",
  issn =         "0922-6389",
  keywords =     "intelligence definitions, collective, psychologist,
                  artificial, universal",
  abstract =     "This chapter is a survey of a large number of informal definitions
                  of ``intelligence'' that the authors have collected over the years.
                  Naturally, compiling a complete list would be impossible as many
                  definitions of intelligence are buried deep inside articles and
                  books. Nevertheless, the 70-odd definitions presented here are, to
                  the authors' knowledge, the largest and most well referenced
                  collection there is.",
  support =      "SNF grant 200020-107616",
}
@InProceedings{Hutter:07lorp,
  author =       "M. Hutter",
  title =        "The Loss Rank Principle for Model Selection",
  booktitle =    "Proc. 20th Annual Conf. on Learning Theory ({COLT'07})",
  address =      "San Diego",
  series =       "LNAI",
  volume =       "4539",
  _editor =       "N. Bshouty and C. Gentile",
  publisher =    "Springer, Berlin",
  pages =        "589--603",
  _month =        jun,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#lorp",
  url =          "http://arxiv.org/abs/math.ST/0702804",
  pdf =          "http://www.hutter1.net/ai/lorp.pdf",
  ps =           "http://www.hutter1.net/ai/lorp.ps",
  latex =        "http://www.hutter1.net/ai/lorp.tex",
  slides =       "http://www.hutter1.net/ai/slorp.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#mdl",
  doi =          "10.1007/978-3-540-72927-3_42",
  issn =         "0302-9743",
  keywords =     "Model selection, loss rank principle,
                  non-parametric regression, classification
                  general loss function, k nearest neighbors.",
  abstract =     "A key issue in statistics and machine learning is to automatically
                  select the ``right'' model complexity, e.g.\ the number of neighbors
                  to be averaged over in k nearest neighbor (kNN) regression or the
                  polynomial degree in regression with polynomials. We suggest a novel
                  principle (LoRP) for model selection in regression and
                  classification. It is based on the loss rank, which counts how many
                  other (fictitious) data would be fitted better. LoRP selects the
                  model that has minimal loss rank. Unlike most penalized maximum
                  likelihood variants (AIC,BIC,MDL), LoRP only depends on the
                  regression functions and the loss function. It works without a
                  stochastic noise model, and is directly applicable to any
                  non-parametric regressor, like kNN.",
  znote =       "Acceptance rate: 41/92 = 45\%",
}
@Article{Hutter:07ait,
  author =       "M. Hutter",
  title =        "Algorithmic Information Theory: a brief non-technical guide to the field",
  journal =      "Scholarpedia",
  pages =        "9620",
  _month =        mar,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#ait",
  http =         "http://www.scholarpedia.org/article/Algorithmic_Information_Theory",
  url =          "http://arxiv.org/abs/cs.IT/0703024",
  pdf =          "http://www.hutter1.net/ai/ait.pdf",
  ps =           "http://www.hutter1.net/ai/ait.ps",
  latex =        "http://www.hutter1.net/ai/ait.zip",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  keywords =     "Algorithmic information theory,
                  algorithmic ``Kolmogorov'' complexity,
                  algorithmic ``Solomonoff'' probability,
                  universal ``Levin'' search,
                  algorithmic ``Martin-Loef" randomness,
                  applications, history, references, notation, nomenclature, map.",
  abstract =     "This article is a brief guide to the field of algorithmic
                  information theory (AIT), its underlying philosophy, and the most
                  important concepts. AIT arises by mixing information theory and
                  computation theory to obtain an objective and absolute notion of
                  information in an individual object, and in so doing gives rise to
                  an objective and robust notion of randomness of individual objects.
                  This is in contrast to classical information theory that is based on
                  random variables and communication, and has no bearing on
                  information and randomness of individual objects. After a brief
                  overview, the major subfields, applications, history, and a map of
                  the field are presented.",
}
@Article{Hutter:07postbndx,
  author =       "A. Chernov and M. Hutter and J. Schmidhuber",
  _author =       "Alexey Chernov and Marcus Hutter and Juergen Schmidhuber",
  title =        "Algorithmic Complexity Bounds on Future Prediction Errors",
  journal =      "Information and Computation",
  volume =       "205",
  number =       "2",
  pages =        "242--261",
  _month =        feb,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#postbndx",
  url =          "http://arxiv.org/abs/cs.LG/0701120",
  conf =         "http://www-alg.ist.hokudai.ac.jp/~thomas/ALT05/alt05.jhtml",
  pdf =          "http://www.hutter1.net/ai/postbndx.pdf",
  ps =           "http://www.hutter1.net/ai/postbndx.ps",
  latex =        "http://www.hutter1.net/ai/postbndx.tex",
  slides =       "http://www.hutter1.net/ai/spostbnd.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  doi =          "10.1016/j.ic.2006.10.004",
  issn =         "0890-5401",
  keywords =     "Kolmogorov complexity, posterior bounds, online sequential prediction,
                  Solomonoff prior, monotone conditional complexity, total error,
                  future loss, randomness deficiency",
  abstract =     "We bound the future loss when predicting any (computably) stochastic
                  sequence online. Solomonoff finitely bounded the total deviation
                  of his universal predictor $M$ from the true distribution $mu$ by
                  the algorithmic complexity of $mu$. Here we assume we are at a
                  time $t>1$ and already observed $x=x_1...x_t$. We bound the future
                  prediction performance on $x_{t+1}x_{t+2}...$ by a new variant of
                  algorithmic complexity of $mu$ given $x$, plus the complexity of
                  the randomness deficiency of $x$. The new complexity is monotone
                  in its condition in the sense that this complexity can only
                  decrease if the condition is prolonged. We also briefly discuss
                  potential generalizations to Bayesian model classes and to
                  classification problems.",
  support =      "SNF grant 2000-61847",
}
@InCollection{Hutter:07aixigentle,
  author =       "M. Hutter",
  title =        "Universal Algorithmic Intelligence: A Mathematical Top$\rightarrow$Down Approach",
  booktitle =    "Artificial General Intelligence",
  _editor =       "B. Goertzel and C. Pennachin",
  publisher =    "Springer",
  address =       "Berlin",
  _series =       "Cognitive Technologies",
  pages =        "227--290",
  _month =        jan,
  year =         "2007",
  bibtex =       "http://www.hutter1.net/official/bib.htm#aixigentle",
  http =         "http://www.hutter1.net/ai/aixigentle.htm",
  url =          "http://arxiv.org/abs/cs.AI/0701125",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-01-03.ps.gz",
  pdf =          "http://www.hutter1.net/ai/aixigentle.pdf",
  ps =           "http://www.hutter1.net/ai/aixigentle.ps",
  latex =        "http://www.hutter1.net/ai/aixigentle.tex",
  slides =       "http://www.hutter1.net/ai/skcunai.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#uai",
  press =        "http://www.hutter1.net/official/press.htm#uaibook",
  isbn =         "3-540-23733-X",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Artificial intelligence; algorithmic probability;
                  sequential decision theory; rational agents;
                  value function; Solomonoff induction;
                  Kolmogorov complexity; reinforcement learning;
                  universal sequence prediction; strategic games;
                  function minimization; supervised learning.",
  abstract =     "Decision theory formally solves the problem of rational agents in
                  uncertain worlds if the true environmental prior probability
                  distribution is known. Solomonoff's theory of universal induction
                  formally solves the problem of sequence prediction for unknown
                  prior distribution. We combine both ideas and get a parameter-free
                  theory of universal Artificial Intelligence. We give strong
                  arguments that the resulting AIXI model is the most intelligent
                  unbiased agent possible. We outline for a number of problem
                  classes, including sequence prediction, strategic games, function
                  minimization, reinforcement and supervised learning, how the AIXI
                  model can formally solve them. The major drawback of the AIXI
                  model is that it is uncomputable. To overcome this problem, we
                  construct a modified algorithm AIXI$tl$ that is still
                  effectively more intelligent than any other time $t$ and length $l$
                  bounded agent. The computation time of AIXI$tl$ is of the order $t
                  \cdot 2^l$. Other discussed topics are formal definitions of
                  intelligence order relations, the horizon problem and relations of
                  the AIXI theory to other AI approaches.",
}

%-------------Publications-of-Marcus-Hutter-2006--------------%

@Article{Hutter:06unipriorx,
  author =       "M. Hutter",
  title =        "On Generalized Computable Universal Priors and their Convergence",
  journal =      "Theoretical Computer Science",
  volume =       "364",
  number =       "1",
  pages =        "27--41",
  _month =        nov,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#unipriorx",
  url =          "http://arxiv.org/abs/cs.LG/0503026",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-05-05.pdf",
  pdf =          "http://www.hutter1.net/ai/unipriorx.pdf",
  ps =           "http://www.hutter1.net/ai/unipriorx.ps",
  latex =        "http://www.hutter1.net/ai/unipriorx.tex",
  slides =       "http://www.hutter1.net/ai/sunipriors.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  doi =          "10.1016/j.tcs.2006.07.039",
  issn =         "0304-3975",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  Solomonoff's prior; universal probability;
                  mixture distributions; posterior convergence;
                  computability concepts; Martin-Loef randomness.",
  abstract =     "Solomonoff unified Occam's razor and Epicurus' principle of
                  multiple explanations to one elegant, formal, universal theory of
                  inductive inference, which initiated the field of algorithmic
                  information theory. His central result is that the posterior of
                  the universal semimeasure M converges rapidly to the true sequence
                  generating posterior mu, if the latter is computable. Hence, M is
                  eligible as a universal predictor in case of unknown mu. The first
                  part of the paper investigates the existence and convergence of
                  computable universal (semi)measures for a hierarchy of
                  computability classes: recursive, estimable, enumerable, and
                  approximable. For instance, M is known to be enumerable, but
                  not estimable, and to dominate all enumerable semimeasures. We
                  present proofs for discrete and continuous semimeasures. The
                  second part investigates more closely the types of convergence,
                  possibly implied by universality: in difference and in ratio, with
                  probability 1, in mean sum, and for Martin-Loef random sequences.
                  We introduce a generalized concept of randomness for individual
                  sequences and use it to exhibit difficulties regarding these
                  issues. In particular, we show that convergence fails (holds) on
                  generalized-random sequences in gappy (dense) Bernoulli classes.",
}
@Article{Hutter:06fuo,
  author =       "M. Hutter and S. Legg",
  title =        "Fitness Uniform Optimization",
  journal  =     "IEEE Transactions on Evolutionary Computation",
  volume =       "10",
  mumber =       "5",
  pages =        "568--589",
  _month =        oct,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#fuo",
  url =          "http://arxiv.org/abs/cs.NE/0610126",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-06.pdf",
  pdf =          "http://www.hutter1.net/ai/fuo.pdf",
  ps =           "http://www.hutter1.net/ai/fuo.ps",
  latex =        "http://www.hutter1.net/ai/fuo.zip",
  slides =       "http://www.hutter1.net/ai/sfuss.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#optimize",
  press =        "http://www.hutter1.net/official/press.htm#fuss",
  doi =          "10.1109/TEVC.2005.863127",
  issn =         "1089-778X",
  keywords =     "Evolutionary algorithms, fitness uniform selection scheme, fitness
                  uniform deletion scheme, preserve diversity, local optima, evolution,
                  universal similarity relation, correlated recombination, fitness tree
                  model, traveling salesman, set covering, satisfiability.",
  abstract =     "In evolutionary algorithms, the fitness of a population increases with
                  time by mutating and recombining individuals and by a biased selection
                  of more fit individuals. The right selection pressure is critical in
                  ensuring sufficient optimization progress on the one hand and in
                  preserving genetic diversity to be able to escape from local optima on
                  the other hand. Motivated by a universal similarity relation on the
                  individuals, we propose a new selection scheme, which is uniform in
                  the fitness values. It generates selection pressure toward sparsely
                  populated fitness regions, not necessarily toward higher fitness, as
                  is the case for all other selection schemes. We show analytically on a
                  simple example that the new selection scheme can be much more
                  effective than standard selection schemes.  We also propose a new
                  deletion scheme which achieves a similar result via deletion and show
                  how such a scheme preserves genetic diversity more effectively than
                  standard approaches.  We compare the performance of the new schemes to
                  tournament selection and random deletion on an artificial deceptive
                  problem and a range of NP-hard problems: traveling salesman, set
                  covering and satisfiability.",
}
@InProceedings{Hutter:06discount,
  author =       "M. Hutter",
  title =        "General Discounting versus Average Reward",
  booktitle =    "Proc. 17th International Conf. on Algorithmic Learning Theory ({ALT'06})",
  address =      "Barcelona",
  series =       "LNAI",
  volume =       "4264",
  _editor =       "Jose L. Balcázar and Phil Long and Frank Stephan",
  publisher =    "Springer, Berlin",
  pages =        "244--258",
  _month =        oct,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#discount",
  url =          "http://arxiv.org/abs/cs.LG/0605040",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-11-06.pdf",
  conf =         "http://www-alg.ist.hokudai.ac.jp/~thomas/ALT06/alt06.jhtml",
  pdf =          "http://www.hutter1.net/ai/discount.pdf",
  ps =           "http://www.hutter1.net/ai/discount.ps",
  latex =        "http://www.hutter1.net/ai/discount.tex",
  slides =       "http://www.hutter1.net/ai/sdiscount.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#rl",
  issn =         "0302-9743",
  isbn =         "3-540-46649-5",
  doi =          "10.1007/11894841_21",
  keywords =     "reinforcement learning; average value;
                  discounted value; arbitrary environment;
                  arbitrary discount sequence; effective horizon;
                  increasing farsightedness; consistent behavior.",
  abstract =     "Consider an agent interacting with an environment in cycles. In
                  every interaction cycle the agent is rewarded for its performance.
                  We compare the average reward U from cycle 1 to m (average
                  value) with the future discounted reward V from cycle k to
                  infinity (discounted value). We consider essentially arbitrary
                  (non-geometric) discount sequences and arbitrary reward sequences
                  (non-MDP environments). We show that asymptotically U for
                  m->infinity and V for k->infinity are equal, provided both
                  limits exist. Further, if the effective horizon grows linearly
                  with k or faster, then existence of the limit of U implies
                  that the limit of V exists. Conversely, if the effective horizon
                  grows linearly with k or slower, then existence of the limit of
                  V implies that the limit of U exists.",
  znote =        "Acceptance rate: 24/53 = 45\%",
}
@InProceedings{Hutter:06actopt,
  author =       "D. Ryabko and M. Hutter",
  title =        "Asymptotic Learnability of Reinforcement Problems with Arbitrary Dependence",
  booktitle =    "Proc. 17th International Conf. on Algorithmic Learning Theory ({ALT'06})",
  address =      "Barcelona",
  series =       "LNAI",
  volume =       "4264",
  _editor =       "Jose L. Balcázar and Phil Long and Frank Stephan",
  publisher =    "Springer, Berlin",
  pages =        "334--347",
  _month =        oct,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#actopt",
  url =          "http://arxiv.org/abs/cs.LG/0603110",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-09-06.pdf",
  conf =         "http://www-alg.ist.hokudai.ac.jp/~thomas/ALT06/alt06.jhtml",
  pdf =          "http://www.hutter1.net/ai/actopt.pdf",
  ps =           "http://www.hutter1.net/ai/actopt.ps",
  latex =        "http://www.hutter1.net/ai/actopt.tex",
  slides =       "http://www.hutter1.net/ai/sactopt.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#universal",
  press =        "http://www.hutter1.net/official/press.htm#universal",
  issn =         "0302-9743",
  isbn =         "3-540-46649-5",
  doi =          "10.1007/11894841_27",
  keywords =     "Reinforcement learning, asymptotic average value,
                  self-optimizing policies, (non) Markov decision processes.",
  abstract =     "We address the problem of reinforcement
                  learning in which observations may exhibit an arbitrary form of
                  stochastic dependence on past observations and actions,
                  i.e. environments more general than (PO)MDPs.
                  The task for an agent is to attain the  best possible asymptotic
                  reward where the true generating environment is unknown but
                  belongs to a known countable family of environments. We find some
                  sufficient conditions on the class of  environments under which an
                  agent exists which attains the best asymptotic reward for any
                  environment in the class. We analyze how tight these conditions
                  are and how they relate to different probabilistic assumptions
                  known in reinforcement learning and related fields, such as Markov
                  Decision Processes and mixing conditions.",
  znote =        "Acceptance rate: 24/53 = 45\%",
}
@Article{Hutter:06mdlspeedx,
  author =       "J. Poland and M. Hutter",
  title =        "{MDL} Convergence Speed for {B}ernoulli Sequences",
  journal =      "Statistics and Computing",
  volume =       "16",
  number =       "2",
  pages =        "161--175",
  _month =        jun,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#mdlspeedx",
  url =          "http://arxiv.org/abs/math.ST/0602505",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-04-06.pdf",
  pdf =          "http://www.hutter1.net/ai/mdlspeedx.pdf",
  ps =           "http://www.hutter1.net/ai/mdlspeedx.ps",
  latex =        "http://www.hutter1.net/ai/mdlspeedx.tex",
  slides =       "http://www.hutter1.net/ai/smdlspeed.pdf",
  slidesppt =    "http://www.hutter1.net/ai/smdlspeed.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#mdl",
  issn =         "0960-3174",
  doi =          "10.1007/s11222-006-6746-3",
  keywords =     "MDL, Minimum Description Length, Convergence Rate,
                  Prediction, Bernoulli, Discrete Model Class.",
  abstract =     "The Minimum Description Length principle for online sequence
                  estimation/prediction in a proper learning setup is studied. If
                  the underlying model class is discrete, then the total expected
                  square loss is a particularly interesting performance measure: (a)
                  this quantity is finitely bounded, implying convergence with
                  probability one, and (b) it additionally specifies the convergence
                  speed. For MDL, in general one can only have loss bounds which are
                  finite but exponentially larger than those for Bayes mixtures. We
                  show that this is even the case if the model class contains only
                  Bernoulli distributions. We derive a new upper bound on the
                  prediction error for countable Bernoulli classes. This implies a
                  small bound (comparable to the one for Bayes mixtures) for certain
                  important model classes. We discuss the application to Machine
                  Learning tasks such as classification and hypothesis testing, and
                  generalization to countable classes of i.i.d. models.",
}
@InProceedings{Hutter:06usp,
  author =       "M. Hutter",
  title =        "On the Foundations of Universal Sequence Prediction",
  booktitle =    "Proc. 3rd Annual Conference on Theory and
                  Applications of Models of Computation ({TAMC'06})",
  volume =       "3959",
  series =       "LNCS",
  pages =        "408--420",
  _editor =       "J.-Y. Cai and S. B. Cooper and A. Li",
  publisher =    "Springer",
  _address =      "Beijing",
  _month =        may,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#usp",
  url =          "http://arxiv.org/abs/cs.LG/0605009",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-03-06.pdf",
  conf =         "http://gcl.iscas.ac.cn/accl06/TAMC06_Home.htm",
  pdf =          "http://www.hutter1.net/ai/usp.pdf",
  ps =           "http://www.hutter1.net/ai/usp.ps",
  latex =        "http://www.hutter1.net/ai/usp.tex",
  slides =       "http://www.hutter1.net/ai/susp.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  issn =         "0302-9743",
  isbn =         "3-540-34021-1",
  doi =          "10.1007/11750321_39",
  keywords =     "Sequence prediction, Bayes, Solomonoff prior,
                  Kolmogorov complexity, Occam's razor, prediction bounds,
                  model classes, philosophical issues, symmetry principle,
                  confirmation theory, reparametrization invariance,
                  old-evidence/updating problem, (non)computable environments.",
  abstract =     "Solomonoff completed the Bayesian framework by providing a
                  rigorous, unique, formal, and universal choice for the model class
                  and the prior. We discuss in breadth how and in which sense
                  universal (non-i.i.d.) sequence prediction solves various
                  (philosophical) problems of traditional Bayesian sequence
                  prediction. We show that Solomonoff's model possesses many
                  desirable properties: Fast convergence and strong bounds, and in
                  contrast to most classical continuous prior densities has no zero
                  p(oste)rior problem, i.e. can confirm universal hypotheses, is
                  reparametrization and regrouping invariant, and avoids the
                  old-evidence and updating problem. It even performs well (actually
                  better) in non-computable environments.",
  znote =        "Acceptance rate: 76/400 = 19\%",
}
@InProceedings{Hutter:06aixifoe,
  author =       "J. Poland and M. Hutter",
  title =        "Universal Learning of Repeated Matrix Games",
  booktitle =    "Proc. 15th Annual Machine Learning Conf. of {B}elgium and {T}he {N}etherlands ({Benelearn'06})",
  pages =        "7--14",
  address =      "Ghent",
  _editor =       "Yvan Saeys and Bernard De Baets and Elena Tsiporkova and Yves Van de Peer",
  xpublisher =    "",
  _month =        may,
  year =         "2006",
  isbn =         "90 382 0948 7",
  bibtex =       "http://www.hutter1.net/official/bib.htm#aixifoe",
  url =          "http://arxiv.org/abs/cs.LG/0508073",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-18-05.pdf",
  conf =         "http://bioinformatics.psb.ugent.be/benelearn2006/",
  pdf =          "http://www.hutter1.net/ai/aixifoe.pdf",
  ps =           "http://www.hutter1.net/ai/aixifoe.ps",
  latex =        "http://www.hutter1.net/ai/aixifoe.zip",
  slides =       "http://www.hutter1.net/ai/saixifoe.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#expert",
  abstract =     "We study and compare the learning dynamics of two universal
                  learning algorithms, one based on Bayesian learning and the
                  other on prediction with expert advice. Both approaches have
                  strong asymptotic performance guarantees. When confronted with
                  the task of finding good long-term strategies in repeated
                  2 x 2 matrix games, they behave quite differently. We consider
                  the case where the learning algorithms are not even informed
                  about the game they are playing.",
}
@InProceedings{Hutter:06ior,
  author =       "S. Legg and M. Hutter",
  title =        "A Formal Measure of Machine Intelligence",
  booktitle =    "Proc. 15th Annual Machine Learning Conference of {B}elgium and {T}he {N}etherlands ({Benelearn'06})",
  pages =        "73--80",
  address =      "Ghent",
  _editor =       "Yvan Saeys and Bernard De Baets and Elena Tsiporkova and Yves Van de Peer",
  _month =        may,
  year =         "2006",
  isbn =         "90 382 0948 7",
  bibtex =       "http://www.hutter1.net/official/bib.htm#ior",
  url =          "http://arxiv.org/abs/cs.AI/0605024",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-10-06.pdf",
  conf =         "http://bioinformatics.psb.ugent.be/benelearn2006/",
  pdf =          "http://www.hutter1.net/ai/ior.pdf",
  ps =           "http://www.hutter1.net/ai/ior.ps",
  latex =        "http://www.hutter1.net/ai/ior.zip",
  slides =       "http://www.hutter1.net/ai/sior.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#uai",
  press =        "http://www.hutter1.net/official/press.htm#ior",
  abstract =     "A fundamental problem in artificial intelligence is that nobody really
                  knows what intelligence is.  The problem is especially acute when we
                  need to consider artificial systems which are significantly different
                  to humans.  In this paper we approach this problem in the following
                  way: We take a number of well known informal definitions of human
                  intelligence that have been given by experts, and extract their
                  essential features.  These are then mathematically formalised to
                  produce a general measure of intelligence for arbitrary machines.  We
                  believe that this measure formally captures the concept of machine
                  intelligence in the broadest reasonable sense.",
}
@InProceedings{Hutter:06robot,
  author =       "V. Zhumatiy and F. Gomez and M. Hutter and J. Schmidhuber",
  _author =       "Viktor Zhumatiy and Faustino Gomez and Marcus Hutter and J{\"u}rgen Schmidhuber",
  title =        "Metric State Space Reinforcement Learning for a Vision-Capable Mobile Robot",
  booktitle =    "Proc. 9th International Conf. on Intelligent Autonomous Systems ({IAS'06})",
  pages =        "272--281",
  _editor =       "Tamio Arai and Rolf Pfeifer and Tucker Balch and Hiroshi Yokoi",
  publisher =    "IOR Press",
  _month =        mar,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#robot",
  url =          "http://arxiv.org/abs/cs.RO/0603023",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-05-06.pdf",
  conf =         "http://www.arai.pe.u-tokyo.ac.jp/IAS-9/",
  pdf =          "http://www.hutter1.net/ai/robot.pdf",
  ps =           "http://www.hutter1.net/ai/robot.ps",
  latex =        "http://www.hutter1.net/ai/robot.zip",
  slides =       "http://www.hutter1.net/ai/srobot.pdf",
  slidesppt =    "http://www.hutter1.net/ai/srobot.ppt",
  isbn =         "1-58603-595-9",
  keywords =     "reinforcement learning; mobile robots.",
  abstract =     "We address the problem of autonomously learning controllers for
                  vision-capable mobile robots. We extend McCallum's (1995)
                  Nearest-Sequence Memory algorithm to allow for general metrics
                  over state-action trajectories. We demonstrate the feasibility of
                  our approach by successfully running our algorithm on a real
                  mobile robot. The algorithm is novel and unique in that it (a)
                  explores the environment and learns directly on a mobile robot
                  without using a hand-made computer model as an intermediate step,
                  (b) does not require manual discretization of the sensor input
                  space, (c) works in piecewise continuous perceptual spaces, and
                  (d) copes with partial observability. Together this allows
                  learning from much less experience compared to previous methods.",
  znote =        "Acceptance rate: 112/146 = 77\%",
}
@Article{Hutter:06knapsack,
  author =       "M. Mastrolilli and M. Hutter",
  title =        "Hybrid Rounding Techniques for Knapsack Problems",
  journal =      "Discrete Applied Mathematics",
  volume =       "154",
  number =       "4",
  pages =        "640--649",
  _month =        mar,
  year =         "2006",
  bibtex =       "http://www.hutter1.net/official/bib.htm#knapsack",
  url =          "http://arxiv.org/abs/cs.CC/0305002",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-03-02.ps.gz",
  pdf =          "http://www.hutter1.net/ai/knapsack.pdf",
  ps =           "http://www.hutter1.net/ai/knapsack.ps",
  latex =        "http://www.hutter1.net/ai/knapsack.tex",
  project =      "http://www.hutter1.net/official/projects.htm#optimize",
  issn =         "0166-218X",
  doi =          "10.1016/j.dam.2005.08.004",
  abstract =     "We address the classical knapsack problem and a variant in which an upper
                  bound is imposed on the number of items that can be selected. We show that
                  appropriate combinations of rounding techniques yield novel and powerful
                  ways of rounding. As an application of these techniques, we present faster
                  polynomial time approximation schemes that computes an approximate solution
                  of any fixed accuracy in linear time. This linear complexity bounds give a
                  substantial improvement of the best previously known polynomial bounds",
}
@Article{Hutter:06unimdlx,
  author =       "Marcus Hutter",
  title =        "Sequential Predictions based on Algorithmic Complexity",
  journal =      "Journal of Computer and System Sciences",
  volume =       "72",
  mumber =       "1",
  pages =        "95--117",
  _month =        feb,
  year =         "2006",
  url =          "http://arxiv.org/abs/cs.IT/0508043",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-04.pdf",
  bibtex =       "http://www.hutter1.net/official/bib.htm#unimdlx",
  url =          "http://arxiv.org/abs/cs.IT/0508043",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-04.pdf",
  pdf =          "http://www.hutter1.net/ai/unimdlx.pdf",
  ps =           "http://www.hutter1.net/ai/unimdlx.ps",
  latex =        "http://www.hutter1.net/ai/unimdlx.tex",
  slides =       "http://www.hutter1.net/ai/sunimdl.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#mdl",
  issn =         "0022-0000",
  doi =          "10.1016/j.jcss.2005.07.001",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  Solomonoff's prior; Monotone Kolmogorov Complexity;
                  Minimal Description Length; Convergence;
                  Self-Optimizingness",
  abstract =     "This paper studies sequence prediction based on the
                  monotone Kolmogorov complexity $\Km=-\lb m$, i.e.\ based on
                  universal MDL. $m$ is extremely close to Solomonoff's prior $M$,
                  the latter being an excellent predictor in deterministic as well
                  as probabilistic environments, where performance is measured in
                  terms of convergence of posteriors or losses. Despite this
                  closeness to $M$, it is difficult to assess the prediction quality
                  of $m$, since little is known about the closeness of their
                  posteriors, which are the important quantities for prediction.
                  We show that for deterministic computable environments, the
                  ``posterior'' and losses of $m$ converge, but rapid convergence
                  could only be shown on-sequence; the off-sequence behavior is
                  unclear. In probabilistic environments, neither the posterior nor
                  the losses converge, in general.",
}
@Proceedings{Hutter:06kcdagabs,
  editor =       "M. Hutter and W. Merkle and P. M. B. Vit\'anyi",
  title =        "Kolmogorov Complexity and Applications",
  number =       "06051",
  _month =        jan/aug,
  year =         "2006",
  series =       "Dagstuhl Seminar Proceedings",
  url1 =         "http://www.hutter1.net/dagstuhl/",
  url2 =         "http://drops.dagstuhl.de/portals/06051",
  url3 =         "http://drops.dagstuhl.de/opus/volltexte/2006/663",
  pdf =          "http://www.hutter1.net/ai/kcdagabs.pdf",
  ps =           "http://www.hutter1.net/ai/kcdagabs.ps",
  latex =        "http://www.hutter1.net/ai/kcdagabs.tex",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  issn =         "1862-4405",
  publisher =    "IBFI",
  _publisher =    "Internationales Begegnungs- und Forschungszentrum fuer Informatik (IBFI), Schloss Dagstuhl, Germany",
  address =      "Dagstuhl, Germany",
  keywords =     "Information theory, Kolmogorov Complexity, effective randomnes,
                  algorithmic probability, recursion theory, computational complexity,
                  machine learning",
  abstract =     "From 29.01.06 to 03.02.06,
                  the Dagstuhl Seminar 06051 ``Kolmogorov Complexity and Applications''
                  was held in the International Conference and Research Center (IBFI),
                  Schloss Dagstuhl. During the seminar, several participants presented
                  their current research, and ongoing work and open problems were
                  discussed. Abstracts of the presentations given during the seminar
                  as well as abstracts of seminar results and ideas are put together
                  in this proceedings. The first section describes the seminar topics and
                  goals in general. Links to extended abstracts or full papers are
                  provided, if available.",
  note =         "http://drops.dagstuhl.de/portals/06051",
}

%-------------Publications-of-Marcus-Hutter-2005--------------%

@Article{Hutter:05mdl2px,
  author =       "Jan Poland and Marcus Hutter",
  title =        "Asymptotics of Discrete {MDL} for Online Prediction",
  journal =      "IEEE Transactions on Information Theory",
  _month =        nov,
  volume =       "51",
  number =       "11",
  pages =        "3780--3795",
  year =         "2005",
  bibtex =       "http://www.hutter1.net/official/bib.htm#mdl2px",
  url =          "http://arxiv.org/abs/cs.IT/0506022",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-13-05.pdf",
  pdf =          "http://www.hutter1.net/ai/mdl2px.pdf",
  ps =           "http://www.hutter1.net/ai/mdl2px.ps",
  latex =        "http://www.hutter1.net/ai/mdl2px.zip",
  slides =       "http://www.hutter1.net/ai/smdl2p.pdf",
  slidesppt =    "http://www.hutter1.net/ai/smdl2p.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#mdl",
  doi =          "10.1109/TIT.2005.856956",
  issn =         "0018-9448",
  keywords =     "Algorithmic Information Theory, Classification, Consistency,
                  Discrete Model Class, Loss Bounds, Minimum Description Length,
                  Regression, Sequence Prediction, Stabilization, Universal Induction.",
  abstract =     "Minimum Description Length (MDL) is an important principle for induction and
                  prediction, with strong relations to optimal Bayesian learning. This paper
                  deals with learning non-i.i.d. processes by means of two-part MDL, where the
                  underlying model class is countable. We consider the online learning framework,
                  i.e. observations come in one by one, and the predictor is allowed to update
                  his state of mind after each time step. We identify two ways of predicting by
                  MDL for this setup, namely a static and a dynamic one. (A third variant,
                  hybrid MDL, will turn out inferior.) We will prove that under the only
                  assumption that the data is generated by a distribution contained in the model
                  class, the MDL predictions converge to the true values almost surely. This is
                  accomplished by proving finite bounds on the quadratic, the Hellinger, and the
                  Kullback-Leibler loss of the MDL learner, which are however exponentially worse
                  than for Bayesian prediction. We demonstrate that these bounds are sharp, even
                  for model classes containing only Bernoulli distributions. We show how these
                  bounds imply regret bounds for arbitrary loss functions. Our results apply to a
                  wide range of setups, namely sequence prediction, pattern classification,
                  regression, and universal induction in the sense of Algorithmic Information
                  Theory among others.",
}
@Article{Hutter:05tree,
  author =       "Marco Zaffalon and Marcus Hutter",
  title =        "Robust Inference of Trees",
  journal =      "Annals of Mathematics and Artificial Intelligence",
  volume =       "45",
  pages =        "215--239",
  _month          oct,
  year =         "2005",
  _publisher =   "Springer",
  bibtex =       "http://www.hutter1.net/official/bib.htm#tree",
  url =          "http://arxiv.org/abs/cs.LG/0511087",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-11-03.pdf",
  pdf =          "http://www.hutter1.net/ai/tree.pdf",
  ps =           "http://www.hutter1.net/ai/tree.ps",
  latex =        "http://www.hutter1.net/ai/tree.zip",
  project =      "http://www.hutter1.net/official/projects.htm#robust",
  doi =          "10.1007/s10472-005-9007-9",
  issn =         "1012-2443",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Robust inference, spanning trees, intervals,
                  dependence, graphical models, mutual information, imprecise
                  probabilities, imprecise Dirichlet model.",
  abstract =     "This paper is concerned with the reliable inference of optimal
                  tree-approximations to the dependency structure of an unknown
                  distribution generating data. The traditional approach to the
                  problem measures the dependency strength between random variables
                  by the index called mutual information. In this paper reliability
                  is achieved by Walley's imprecise Dirichlet model, which
                  generalizes Bayesian learning with Dirichlet priors. Adopting the
                  imprecise Dirichlet model results in posterior interval
                  expectation for mutual information, and in a set of plausible
                  trees consistent with the data. Reliable inference about the
                  actual tree is achieved by focusing on the substructure common to
                  all the plausible trees. We develop an exact algorithm that infers
                  the substructure in time O(m^4), m being the number of random
                  variables. The new algorithm is applied to a set of data sampled
                  from a known distribution. The method is shown to reliably infer
                  edges of the actual tree even when the data are very scarce,
                  unlike the traditional approach. Finally, we provide lower and
                  upper credibility limits for mutual information under the
                  imprecise Dirichlet model. These enable the previous developments
                  to be extended to a full inferential method for trees.",
}
@InProceedings{Hutter:05postbnd,
  author =       "Alexey Chernov and Marcus Hutter",
  title =        "Monotone Conditional Complexity Bounds on Future Prediction Errors",
  booktitle =    "Proc. 16th International Conf. on Algorithmic Learning Theory ({ALT'05})",
  address =      "Singapore",
  series =       "LNAI",
  volume =       "3734",
  _editor =       "Sanjay Jain and Hans Ulrich Simon and Etsuji Tomita",
  publisher =    "Springer, Berlin",
  pages =        "414--428",
  _month =        oct,
  year =         "2005",
  bibtex =       "http://www.hutter1.net/official/bib.htm#postbnd",
  url =          "http://arxiv.org/abs/cs.LG/0507041",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-05.pdf",
  pdf =          "http://www.hutter1.net/ai/postbnd.pdf",
  ps =           "http://www.hutter1.net/ai/postbnd.ps",
  latex =        "http://www.hutter1.net/ai/postbnd.tex",
  slides =       "http://www.hutter1.net/ai/spostbnd.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#ait",
  issn =         "0302-9743",
  isbn =         "3-540-29242-X",
  keywords =     "Kolmogorov complexity, posterior bounds,
                  online sequential prediction, Solomonoff prior,
                  monotone conditional complexity, total error,
                  future loss, randomness deficiency.",
  abstract =     "We bound the future loss when predicting any (computably)
                  stochastic sequence online. Solomonoff finitely bounded the total
                  deviation of his universal predictor M from the true
                  distribution m by the algorithmic complexity of m. Here we
                  assume we are at a time t>1 and already observed x=x_1...x_t.
                  We bound the future prediction performance on x_{t+1}x_{t+2}...
                  by a new variant of algorithmic complexity of m given x,
                  plus the complexity of the randomness deficiency of x. The new
                  complexity is monotone in its condition in the sense that this
                  complexity can only decrease if the condition is prolonged. We
                  also briefly discuss potential generalizations to Bayesian model
                  classes and to classification problems.",
  support =      "SNF grant 200020-100259 and 2100-67712",
  znote =        "Acceptance rate: 30/98 = 30\%",
}
@InProceedings{Hutter:05actexp2,
  author =       "Jan Poland and Marcus Hutter",
  title =        "Defensive Universal Learning with Experts",
  booktitle =    "Proc. 16th International Conf. on Algorithmic Learning Theory ({ALT'05})",
  address =      "Singapore",
  series =       "LNAI",
  volume =       "3734",
  _editor =       "Sanjay Jain and Hans Ulrich Simon and Etsuji Tomita",
  publisher =    "Springer, Berlin",
  _month =        oct,
  pages =        "356--370",
  year =         "2005",
  bibtex =       "http://www.hutter1.net/official/bib.htm#actexp2",
  url =          "http://arxiv.org/abs/cs.LG/0507044",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-15-05.pdf",
  pdf =          "http://www.hutter1.net/ai/actexp2.pdf",
  ps =           "http://www.hutter1.net/ai/actexp2.ps",
  latex =        "http://www.hutter1.net/ai/actexp2.tex",
  slides =       "http://www.hutter1.net/ai/sactexp.pdf",
  slidesppt =    "http://www.hutter1.net/ai/sactexp.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#expert",
  issn =         "0302-9743",
  isbn =         "3-540-29242-X",
  keywords =     "Prediction with expert advice, responsive
                  environments, partial observation game, bandits, universal
                  learning, asymptotic optimality.",
  abstract =     "This paper shows how universal learning can be achieved with
                  expert advice. To this aim, we specify an experts algorithm with
                  the following characteristics: (a) it uses only feedback from the
                  actions actually chosen (bandit setup), (b) it can be applied with
                  countably infinite expert classes, and (c) it copes with losses
                  that may grow in time appropriately slowly. We prove loss bounds
                  against an adaptive adversary. From this, we obtain a master
                  algorithm for ``reactive'' experts problems, which means that the
                  master's actions may influence the behavior of the adversary. Our
                  algorithm can significantly outperform standard experts algorithms
                  on such problems. Finally, we combine it with a universal expert
                  class. The resulting universal learner performs -- in a certain
                  sense -- almost as well as any computable strategy, for any online
                  decision problem. We also specify the (worst-case) convergence
                  speed, which is very slow.",
  znote =        "Acceptance rate: 30/98 = 30\%",
}
@InProceedings{Hutter:05iors,
  author =       "Shane Legg and Marcus Hutter",
  title =        "A Universal Measure of Intelligence for Artificial Agents",
  booktitle =    "Proc. 21st International Joint Conf. on Artificial Intelligence ({IJCAI-2005})",
  pages =        "1509--1510",
  _editor =       "L. P. Kaelbling and A. Saffiotti",
  _publisher =    "Professional Book Center",
  address =      "Edinburgh",
  _month =        aug,
  year =         "2005",
  bibtex =       "http://www.hutter1.net/official/bib.htm#iors",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-04-05.pdf",
  conf =         "http://ijcai05.csd.abdn.ac.uk/index.php?section=posterlist",
  pdf =          "http://www.hutter1.net/ai/iors.pdf",
  ps =           "http://www.hutter1.net/ai/iors.ps",
  slides =       "http://www.hutter1.net/ai/siors.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#uai",
  press =        "http://www.hutter1.net/official/press.htm#ior",
  code =         "http://www.hutter1.net/ai/iors.cpp",
  isbn_print =   "0-938075-93-4",
  isbn_cd =      "0-938075-94-2",
  support =      "SNF grant 2100-67712",
  znote =        "Acceptance rate: 112/453 = 25\%",
}
@InProceedings{Hutter:05fuds,
  author =       "Shane Legg and Marcus Hutter",
  title =        "Fitness Uniform Deletion for Robust Optimization",
  booktitle =    "Proc. Genetic and Evolutionary Computation Conference ({GECCO'05})",
  address =      "Washington, OR",
  editor =       "H.-G. Beyer et al.",
  publisher =    "ACM SigEvo",
  _month =        jun,
  year =         "2005",
  pages =        "1271--1278",
  bibtex =       "http://www.hutter1.net/official/bib.htm#fuds",
  http =         "http://www.hutter1.net/ai/fuds.htm",
  url =          "http://arxiv.org/abs/cs.NE/0504035",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-11-04.pdf",
  pdf =          "http://www.hutter1.net/ai/fuds.pdf",
  ps =           "http://www.hutter1.net/ai/fuds.ps",
  latex =        "http://www.hutter1.net/ai/fuds.zip",
  slides =       "http://www.hutter1.net/ai/sfuds.pdf",
  slidesppt =    "http://www.hutter1.net/ai/sfuds.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#optimize",
  press =        "http://www.hutter1.net/official/press.htm#fuss",
  code1 =        "http://www.hutter1.net/ai/fussdd.cpp",
  code2 =        "http://www.hutter1.net/ai/fussdd.h",
  code3 =        "http://www.hutter1.net/ai/fusstsp.cpp",
  code4 =        "http://www.hutter1.net/ai/fusstsp.h",
  isbn =         "1-59593-010-8",
  keywords =     "Evolutionary algorithm, deletion schemes, fitness evaluation,
                  optimization, fitness landscapes, (self)adaptation.",
  abstract =     "A commonly experienced problem with population based optimisation
                  methods is the gradual decline in population diversity that tends
                  to occur over time.  This can slow a system's progress or even
                  halt it completely if the population converges on a local optimum
                  from which it cannot escape.  In this paper we present the Fitness
                  Uniform Deletion Scheme (FUDS), a simple but somewhat
                  unconventional approach to this problem.  Under FUDS the deletion
                  operation is modified to only delete those individuals which are
                  ``common'' in the sense that there exist many other individuals of
                  similar fitness in the population.  This makes it impossible for
                  the population to collapse to a collection of highly related
                  individuals with similar fitness. Our experimental results on a
                  range of optimisation problems confirm this, in particular for
                  deceptive optimisation problems the performance is significantly
                  more robust to variation in the selection intensity.",
  znote =        "Acceptance rate: 253/549 = 46\%",
}
@Article{Hutter:05expertx,
  author =       "Marcus Hutter and Jan Poland",
  title =        "Adaptive Online Prediction by Following the Perturbed Leader",
  volume =       "6",
  _month =        apr,
  year =         "2005",
  pages =        "639--660",
  journal =      "Journal of Machine Learning Research",
  publisher =    "Microtome",
  bibtex =       "http://www.hutter1.net/official/bib.htm#expertx",
  http =         "http://www.hutter1.net/ai/expertx.htm",
  url =          "http://arxiv.org/abs/cs.AI/0504078",
  url2 =         "http://www.jmlr.org/papers/v6/hutter05a.html",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-10-05.pdf",
  pdf =          "http://www.hutter1.net/ai/expertx.pdf",
  ps =           "http://www.hutter1.net/ai/expertx.ps",
  latex =        "http://www.hutter1.net/ai/expertx.tex",
  slides =       "http://www.hutter1.net/ai/sexpert.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#expert",
  issn =         "1532-4435",
  keywords =     "Prediction with Expert Advice, Follow the Perturbed Leader,
                  general weights, adaptive learning rate,
                  adaptive adversary, hierarchy of experts,
                  expected and high probability bounds, general alphabet and loss,
                  online sequential prediction.",
  abstract =     "When applying aggregating strategies to Prediction with Expert
                  Advice, the learning rate must be adaptively tuned. The natural
                  choice of sqrt(complexity/current loss) renders the analysis of
                  Weighted Majority derivatives quite complicated. In particular,
                  for arbitrary weights there have been no results proven so far.
                  The analysis of the alternative ``Follow the Perturbed Leader''
                  (FPL) algorithm from Kalai & Vempala (2003) (based on Hannan's
                  algorithm) is easier. We derive loss bounds for adaptive learning
                  rate and both finite expert classes with uniform weights and
                  countable expert classes with arbitrary weights. For the former
                  setup, our loss bounds match the best known results so far, while
                  for the latter our results are new.",
}
@Article{Hutter:05mifs,
  author =       "Marcus Hutter and Marco Zaffalon",
  title =        "Distribution of Mutual Information from Complete and Incomplete Data",
  journal =      "Computational Statistics \& Data Analysis",
  volume =       "48",
  number =       "3",
  pages =        "633--657",
  _month =        mar,
  year =         "2005",
  publisher =    "Elsevier Science",
  bibtex =       "http://www.hutter1.net/official/bib.htm#mifs",
  http =         "http://www.hutter1.net/ai/mifs.htm",
  url =          "http://arxiv.org/abs/cs.LG/0403025",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-11-02.pdf",
  pdf =          "http://www.hutter1.net/ai/mifs.pdf",
  ps =           "http://www.hutter1.net/ai/mifs.ps",
  latex =        "http://www.hutter1.net/ai/mifs.zip",
  slides =       "http://www.hutter1.net/ai/smimiss.pdf",
  slidesppt =    "http://www.hutter1.net/ai/smimiss.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#robust",
  code =         "http://www.hutter1.net/ai/mifs.cpp",
  doi =          "10.1016/j.csda.2004.03.010",
  issn =         "0167-9473",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Mutual information, cross entropy, Dirichlet distribution, second
                  order distribution, expectation and variance of mutual
                  information, feature selection, filters, naive Bayes classifier,
                  Bayesian statistics.",
  abstract =     "Mutual information is widely used, in a descriptive way, to measure the
                  stochastic dependence of categorical random variables. In order to address
                  questions such as the reliability of the descriptive value, one must consider
                  sample-to-population inferential approaches. This paper deals with the
                  posterior distribution of mutual information, as obtained in a Bayesian
                  framework by a second-order Dirichlet prior distribution. The exact analytical
                  expression for the mean, and analytical approximations for the variance,
                  skewness and kurtosis are derived. These approximations have a guaranteed
                  accuracy level of the order O(1/n^3), where n is the sample size. Leading order
                  approximations for the mean and the variance are derived in the case of
                  incomplete samples. The derived analytical expressions allow the distribution
                  of mutual information to be approximated reliably and quickly. In fact, the
                  derived expressions can be computed with the same order of complexity needed
                  for descriptive mutual information. This makes the distribution of mutual
                  information become a concrete alternative to descriptive mutual information in
                  many applications which would benefit from moving to the inductive side. Some
                  of these prospective applications are discussed, and one of them, namely
                  feature selection, is shown to perform significantly better when inductive
                  mutual information is used.",
}
@InProceedings{Hutter:05mdlreg,
  author =       "Jan Poland and Marcus Hutter",
  title =        "Strong Asymptotic Assertions for Discrete {MDL} in Regression and Classification",
  booktitle =    "Proc. 14th {D}utch-{B}elgium Conf. on Machine Learning ({Benelearn'05})",
  address =      "Enschede",
  _editor =       "Martijn {van Otterlo} and Mannes Poel and Anton Nijholt",
  pages =        "67--72",
  _month =        feb,
  year =         "2005",
  _number =       "WP05-03",
  _series =       "CTIT Workshop Proceedings Series",
  _organization = "CTIT Research Institute, University of Twente",
  bibtex =       "http://www.hutter1.net/official/bib.htm#mdlreg",
  url =          "http://arxiv.org/abs/math.ST/0502315",
  conf =         "http://hmi.ewi.utwente.nl/conference/benelearn2005",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-02-05.pdf",
  pdf =          "http://www.hutter1.net/ai/mdlreg.pdf",
  ps =           "http://www.hutter1.net/ai/mdlreg.ps",
  latex =        "http://www.hutter1.net/ai/mdlreg.tex",
  slides =       "http://www.hutter1.net/ai/smdlreg.pdf",
  slidesppt =    "http://www.hutter1.net/ai/smdlreg.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#mdl",
  issn =         "0929-0672",
  keywords =     "Regression, Classification, Sequence Prediction,
                  Machine Learning, Minimum Description Length, Bayes Mixture,
                  Marginalization, Convergence, Discrete Model Classes.",
  abstract =     "We study the properties of the MDL (or maximum penalized
                  complexity) estimator for Regression and Classification, where the
                  underlying model class is countable. We show in particular a
                  finite bound on the Hellinger losses under the only assumption
                  that there is a ``true'' model contained in the class. This implies
                  almost sure convergence of the predictive distribution to the true
                  one at a fast rate. It corresponds to Solomonoff's central theorem
                  of universal induction, however with a bound that is exponentially
                  larger.",
}
@InProceedings{Hutter:05actexp,
  author =       "Jan Poland and Marcus Hutter",
  title =        "Master Algorithms for Active Experts Problems based on Increasing Loss Values",
  booktitle =    "Proc. 14th {D}utch-{B}elgium Conf. on Machine Learning ({Benelearn'05})",
  address =      "Enschede",
  _editor =       "Martijn {van Otterlo} and Mannes Poel and Anton Nijholt",
  pages =        "59--66",
  _month =        feb,
  year =         "2005",
  _number =       "WP05-03",
  _series =       "CTIT Workshop Proceedings Series",
  _organization = "CTIT Research Institute, University of Twente",
  bibtex =       "http://www.hutter1.net/official/bib.htm#actexp",
  url =          "http://arxiv.org/abs/cs.LG/0502067",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-01-05.pdf",
  conf =         "http://hmi.ewi.utwente.nl/conference/benelearn2005",
  pdf =          "http://www.hutter1.net/ai/actexp.pdf",
  ps =           "http://www.hutter1.net/ai/actexp.ps",
  latex =        "http://www.hutter1.net/ai/actexp.tex",
  slides =       "http://www.hutter1.net/ai/sactexp.pdf",
  slidesppt =    "http://www.hutter1.net/ai/sactexp.ppt",
  project =      "http://www.hutter1.net/official/projects.htm#expert",
  issn =         "0929-0672",
  keywords =     "Prediction with expert advice, responsive
                  environments, partial observation game, bandits, universal
                  learning, asymptotic optimality.",
  abstract =     "We specify an experts algorithm with the following
                  characteristics: (a) it uses only feedback from the actions
                  actually chosen (bandit setup), (b) it can be applied with
                  countably infinite expert classes, and (c) it copes with
                  losses that may grow in time appropriately slowly. We
                  prove loss bounds against an adaptive adversary. From this, we
                  obtain master algorithms for ``active experts problems'', which
                  means that the master's actions may influence the behavior of
                  the adversary. Our algorithm can significantly outperform
                  standard experts algorithms on such problems. Finally, we
                  combine it with a universal expert class. This results in a
                  (computationally infeasible) universal master algorithm
                  which performs - in a certain sense - almost as well as any
                  computable strategy, for any online problem.",
}
@Slides{Hutter:05predict,
@Slides{Hutter:05predict,
  author =       "Marcus Hutter",
  title =        "How to predict with {Bayes}, {MDL}, and {Experts}",
  _month =        jan,
  year =         "2005",
  note =         "Presented at the Machine Learning Summer School (MLSS)",
  http =         "http://canberra05.mlss.cc/",
  url =          "http://www.idsia.ch/~marcus/ai/predict.htm",
  slides =       "http://www.idsia.ch/~marcus/ai/spredict.pdf",
}
@InProceedings{Hutter:05bayestree,
  author =       "Marcus Hutter",
  title =        "Fast Non-Parametric {B}ayesian Inference on Infinite Trees",
  booktitle =    "Proc. 10th International Conf. on Artificial Intelligence and Statistics ({AISTATS-2005})",
  _address =      "Barbados",
  _editor =       "R. G. Cowell and Z. Ghahramani",
  publisher =    "Society for Artificial Intelligence and Statistics",
  pages =        "144--151",
  _month =        jan,
  year =         "2005",
  bibtex =       "http://www.hutter1.net/official/bib.htm#bayestree",
  http =         "http://www.hutter1.net/ai/bayestree.htm",
  url =          "http://arxiv.org/abs/math.PR/0411515",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-24-04.pdf",
  pdf =          "http://www.hutter1.net/ai/bayestree.pdf",
  ps =           "http://www.hutter1.net/ai/bayestree.ps",
  latex =        "http://www.hutter1.net/ai/bayestree.zip",
  slides =       "http://www.hutter1.net/ai/sbayestree.pdf",
  project =      "http://www.hutter1.net/official/projects.htm#bayes",
  code =         "http://www.hutter1.net/ai/bayestree.c",
  isbn =         "0-9727358-1-X",
  keywords =     "Bayesian density estimation, exact linear time algorithm,
                  non-parametric inference, adaptive infinite tree, Polya tree,
                  scale invariance.",
  abstract =     "Given i.i.d. data from an unknown distribution,
                  we consider the problem of predicting future items.
                  An adaptive way to estimate the probability density
                  is to recursively subdivide the domain to an appropriate
                  data-dependent granularity. A Bayesian would assign a
                  data-independent prior probability to ``subdivide'', which leads
                  to a prior over infinite(ly many) trees. We derive an exact, fast,
                  and simple inference algorithm for such a prior, for the data
                  evidence, the predictive distribution, the effective model
                  dimension, and other quantities.",
  znote =        "Acceptance rate: 57/150 = 38\%",
}

%-------------Publications-of-Marcus-Hutter-2004--------------%

TechReport{Hutter:04mdp,
  author =       "S. Legg and M. Hutter",
  number =      "IDSIA-21-04",
  title =        "Ergodic {MDP}s Admit Self-Optimising Policies",
  year =         "2004",
  institution =   "{IDSIA}",
}
TechReport{Hutter:04env,
  author =       "S. Legg and M. Hutter",
  number =      "IDSIA-20-04",
  title =        "A Taxonomy for Abstract Environments",
  year =         "2004",
  institution =   "{IDSIA}",
}
@Book{Hutter:04uaibook,
  author =       "Marcus Hutter",
  title =        "Universal Artificial Intelligence:
                  Sequential Decisions based on Algorithmic Probability",
  _series =       "EATCS",
  publisher =    "Springer",
  address =      "Berlin",
  year =         "2004",
  note =         "300 pages, http://www.idsia.ch/$_{^{\sim}}$marcus/ai/uaibook.htm",
  url =          "http://www.hutter1.net/ai/uaibook.htm",
  review =       "http://www.reviews.com/review/review_review.cfm?review_id=131175",
  keywords =     "Artificial intelligence; algorithmic probability;
                  sequential decision theory; Solomonoff induction;
                  Kolmogorov complexity; Bayes mixture distributions;
                  reinforcement learning; universal sequence prediction;
                  tight loss and error bounds; Levin search;
                  strategic games; function minimization; supervised learning.",
  abstract =     "This book presents sequential decision theory from a
                  novel algorithmic information theory perspective. While the former
                  theory is suited for active agents in known environments, the
                  latter is suited for passive prediction of unknown environments.
                  The book introduces these two well-known but very different ideas
                  and removes the limitations by unifying them to one parameter-free
                  theory of an optimal reinforcement learning agent interacting with
                  an arbitrary unknown world. Most if not all AI problems can easily
                  be formulated within this theory, which reduces the conceptual
                  problems to pure computational ones. Considered problem classes
                  include sequence prediction, strategic games, function
                  minimization, reinforcement and supervised learning. Formal
                  definitions of intelligence order relations, the horizon problem
                  and relations to other approaches to AI are discussed. One
                  intention of this book is to excite a broader AI audience about
                  abstract algorithmic information theory concepts, and conversely
                  to inform theorists about exciting applications to AI.",
}
@InProceedings{Hutter:04mlconvx,
  author =       "M. Hutter and An. A. Muchnik",
  title =        "Universal Convergence of Semimeasures on Individual Random Sequences",
  booktitle =    "Proc. 15th International Conf. on Algorithmic Learning Theory ({ALT'04})",
  address =      "Padova",
  series =       "LNAI",
  volume =       "3244",
  _editor =       "S. Ben-David and J. Case and A. Maruoka",
  publisher =    "Springer, Berlin",
  pages =        "234--248",
  year =         "2004",
  issn =         "0302-9743",
  isbn =         "3-540-23356-3",
  http =         "http://www.hutter1.net/ai/mlconvx.htm",
  url =          "http://arxiv.org/abs/cs.LG/0407057",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-14-04.pdf",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  universal enumerable semimeasure; mixture distributions;
                  posterior convergence; Martin-L{\"o}f randomness;
                  quasimeasures.",
  abstract =     "Solomonoff's central result on induction is that the posterior of
                  a universal semimeasure M converges rapidly and with probability
                  1 to the true sequence generating posterior mu, if the latter is
                  computable. Hence, M is eligible as a universal sequence predictor
                  in case of unknown mu. Despite some nearby results and proofs in
                  the literature, the stronger result of convergence for all
                  (Martin-Loef) random sequences remained open. Such a convergence
                  result would be particularly interesting and natural, since
                  randomness can be defined in terms of M itself. We show that there
                  are universal semimeasures M which do not converge for all random
                  sequences, i.e. we give a partial negative answer to the open
                  problem. We also provide a positive answer for some non-universal
                  semimeasures. We define the incomputable measure D as a mixture
                  over all computable measures and the enumerable semimeasure W as a
                  mixture over all enumerable nearly-measures. We show that W
                  converges to D and D to mu on all random sequences. The Hellinger
                  distance measuring closeness of two distributions plays
                  a central role.",
  znote =        "Acceptance rate: 29/91 = 32\%",
}
@InProceedings{Hutter:04expert,
  author =       "M. Hutter and J. Poland",
  title =        "Prediction with Expert Advice by Following the Perturbed Leader for General Weights",
  booktitle =    "Proc. 15th International Conf. on Algorithmic Learning Theory ({ALT'04})",
  address =      "Padova",
  series =       "LNAI",
  volume =       "3244",
  _editor =       "S. Ben-David and J. Case and A. Maruoka",
  publisher =    "Springer, Berlin",
  pages =        "279--293",
  year =         "2004",
  issn =         "0302-9743",
  isbn =         "3-540-23356-3",
  http =         "http://www.hutter1.net/ai/expert.htm",
  url =          "http://arxiv.org/abs/cs.LG/0405043",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-08-04.pdf",
  keywords =     "Prediction with Expert Advice, Follow the Perturbed Leader,
                  general weights, adaptive learning rate,
                  hierarchy of experts, expected and high probability bounds,
                  general alphabet and loss, online sequential prediction.",
  abstract =     "When applying aggregating strategies to Prediction with Expert
                  Advice, the learning rate must be adaptively tuned. The natural
                  choice of sqrt(complexity/current loss) renders the
                  analysis of Weighted Majority derivatives quite complicated. In
                  particular, for arbitrary weights there have been no results
                  proven so far. The analysis of the alternative ``Follow the
                  Perturbed Leader'' (FPL) algorithm from Kalai \& Vempala (2003) (based on
                  Hannan's algorithm) is easier. We derive loss bounds for adaptive
                  learning rate and both finite expert classes with uniform weights
                  and countable expert classes with arbitrary weights. For the
                  former setup, our loss bounds match the best known results so far,
                  while for the latter our results are new.",
  znote =        "Acceptance rate: 29/91 = 32\%",
}
@InProceedings{Hutter:04mdlspeed,
  author =       "J. Poland and M. Hutter",
  title =        "On the convergence speed of {MDL} predictions for {B}ernoulli sequences",
  booktitle =    "Proc. 15th International Conf. on Algorithmic Learning Theory ({ALT'04})",
  address =      "Padova",
  series =       "LNAI",
  volume =       "3244",
  _editor =       "S. Ben-David and J. Case and A. Maruoka",
  publisher =    "Springer, Berlin",
  pages =        "294--308",
  year =         "2004",
  issn =         "0302-9743",
  isbn =         "3-540-23356-3",
  http =         "http://www.hutter1.net/ai/mdlspeed.htm",
  url =          "http://arxiv.org/abs/cs.LG/0407039",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-13-04.pdf",
  keywords =     "MDL, Minimum Description Length, Convergence Rate,
                  Prediction, Bernoulli, Discrete Model Class.",
  abstract =     "We consider the Minimum Description Length principle for online
                  sequence prediction. If the underlying model class is discrete,
                  then the total expected square loss is a particularly interesting
                  performance measure: (a) this quantity is bounded, implying
                  convergence with probability one, and (b) it additionally
                  specifies a `rate of convergence'. Generally, for MDL only
                  exponential loss bounds hold, as opposed to the linear bounds for
                  a Bayes mixture. We show that this is even the case if the model
                  class contains only Bernoulli distributions. We derive a new upper
                  bound on the prediction error for countable Bernoulli classes.
                  This implies a small bound (comparable to the one for Bayes
                  mixtures) for certain important model classes. The results apply
                  to many Machine Learning tasks including classification and
                  hypothesis testing. We provide arguments that our theorems
                  generalize to countable classes of i.i.d. models.",
  znote =        "Acceptance rate: 29/91 = 32\%",
}
TechReport{Hutter:04bayespea,
  author =       "Marcus Hutter",
  title =        "Online Prediction -- {B}ayes versus Experts",
  institution =  "http://www.idsia.ch/$_{^\sim}$marcus/ai/bayespea.htm",
  month =        jul,
  pages =        "4 pages",
  year =         "2004",
  note =         "Presented at the {\em EU PASCAL Workshop on
                  Learning Theoretic and Bayesian Inductive Principles (LTBIP-2004)}",
  url =          "http://www.hutter1.net/ai/bayespea.htm",
  ps =           "http://www.hutter1.net/ai/bayespea.ps",
  pdf =          "http://www.hutter1.net/ai/bayespea.pdf",
  slides =       "http://www.hutter1.net/ai/sbayespea.pdf",
  keywords =     "Bayesian sequence prediction;
                  Prediction with Expert Advice;
                  general weights, alphabet and loss.",
  abstract =     "We derive a very general regret bound in the framework of
                  prediction with expert advice, which challenges the best known
                  regret bound for Bayesian sequence prediction. Both bounds of the
                  form $\sqrt{\mbox{Loss}\times\mbox{complexity}}$ hold for any
                  bounded loss-function, any prediction and observation spaces,
                  arbitrary expert/environment classes and weights, and unknown
                  sequence length.",
}
@InProceedings{Hutter:04mdl2p,
  author =       "J. Poland and M. Hutter",
  title =        "Convergence of Discrete {MDL} for Sequential Prediction",
  booktitle =    "Proc. 17th Annual Conf. on Learning Theory ({COLT'04})",
  address =      "Banff",
  series =       "LNAI",
  volume =       "3120",
  _editor =       "J. Shawe-Taylor and Y. Singer",
  publisher =    "Springer, Berlin",
  pages =        "300--314",
  year =         "2004",
  isbn =         "3-540-22282-0",
  http =         "http://www.hutter1.net/ai/mdl2p.htm",
  url =          "http://arxiv.org/abs/cs.LG/0404057",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-03-04.pdf",
  keywords =     "Minimum Description Length, Sequence Prediction,
                  Convergence, Discrete Model Classes, Universal Induction,
                  Stabilization, Algorithmic Information Theory.",
  abstract =     "We study the properties of the Minimum Description Length principle for
                  sequence prediction, considering a two-part MDL estimator which is chosen from
                  a countable class of models. This applies in particular to the important case
                  of universal sequence prediction, where the model class corresponds to all
                  algorithms for some fixed universal Turing machine (this correspondence is by
                  enumerable semimeasures, hence the resulting models are stochastic). We prove
                  convergence theorems similar to Solomonoff's theorem of universal induction,
                  which also holds for general Bayes mixtures. The bound characterizing the
                  convergence speed for MDL predictions is exponentially larger as compared to
                  Bayes mixtures. We observe that there are at least three different ways of
                  using MDL for prediction. One of these has worse prediction properties, for
                  which predictions only converge if the MDL estimator stabilizes. We establish
                  sufficient conditions for this to occur. Finally, some immediate consequences
                  for complexity relations and randomness criteria are proven.",
  znote =        "Acceptance rate: 44/107 = 41\%",
}
@InProceedings{Hutter:04fussexp,
  author =       "S. Legg and M. Hutter and A. Kumar",
  title =        "Tournament versus Fitness Uniform Selection",
  booktitle =    "Proc. 2004 Congress on Evolutionary Computation ({CEC'04})",
  address =      "Portland, OR",
  xeditor =       "??",
  publisher =    "IEEE",
  ISBN =         "0-7803-8515-2",
  _month =        jun,
  year =         "2004",
  pages =        "2144--2151",
  keywords =     "Selection schemes, fitness evaluation, optimization,
                  fitness landscapes, basic working principles of evolutionary computations,
                  (self)adaptation, evolutionary algorithm,
                  deceptive \& multimodal optimization problems.",
  http =         "http://www.hutter1.net/ai/fussexp.htm",
  url =          "http://arxiv.org/abs/cs.LG/0403038",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-04-04.pdf",
  press =        "http://www.trnmag.com/Stories/032801/Diversity_trumps_fitness_032801.html",
  abstract =     "In evolutionary algorithms a critical parameter that must be tuned is
                  that of selection pressure.  If it is set too low then the rate of
                  convergence towards the optimum is likely to be slow.  Alternatively
                  if the selection pressure is set too high the system is likely to
                  become stuck in a local optimum due to a loss of diversity in the
                  population. The recent Fitness Uniform Selection Scheme (FUSS) is a
                  conceptually simple but somewhat radical approach to addressing this
                  problem --- rather than biasing the selection towards higher fitness,
                  FUSS biases selection towards sparsely populated fitness levels. In
                  this paper we compare the relative performance of FUSS with the well
                  known tournament selection scheme on a range of problems.",
  znote =        "Acceptance rate: 300/460 = 65\%",
}

%-------------Publications-of-Marcus-Hutter-2003--------------%

@PhDThesis{Hutter:03habil,
  author =       "Marcus Hutter",
  author =       "Marcus Hutter",
  school =       "Fakult{\"a}t f{\"u}r Informatik",
  address =      "TU M{\"u}nchen",
  title =        "Optimal Sequential Decisions based on Algorithmic Probability",
  year =         "2003",
  pages =        "1--288",
  url  =         "http://www.hutter1.net/ai/habil.htm",
  _url =          "http://arxiv.org/abs/cs.AI/0306091",
  xftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-03.ps.gz",
  keywords =     "Artificial intelligence; algorithmic probability;
                  sequential decision theory; Solomonoff induction;
                  Kolmogorov complexity; Bayes-mixture distributions;
                  reinforcement learning; universal sequence prediction;
                  tight loss and error bounds; Levin search;
                  strategic games; function minimization;
                  supervised learning.",
  abstract =     "Decision theory formally solves the problem of rational agents in
                  uncertain worlds if the true environmental prior probability
                  distribution is known. Solomonoff's theory of universal induction
                  formally solves the problem of sequence prediction for unknown
                  prior distribution. In this \thesis\ both ideas are unified to one
                  parameter-free theory for universal Artificial Intelligence. We
                  give strong arguments that the resulting AIXI model is the most
                  intelligent unbiased agent possible. We outline for a number of
                  problem classes, including sequence prediction, strategic games,
                  function minimization, reinforcement and supervised learning, how
                  the AIXI model can formally solve them. The major drawback of the
                  AIXI model is that it is uncomputable. To overcome this problem,
                  we construct a modified algorithm AIXI$tl$, which is still
                  effectively more intelligent than any other time $t$ and length $l$
                  bounded agent. The computation time of AIXI$tl$ is of the order
                  $t\cdot 2^l$. The discussion includes formal definitions of
                  intelligence order relations, the horizon problem and relations of
                  the AIXI theory to other AI approaches.",
  note =         "288 pages, draft, http://www.idsia.ch/$_{^\sim}$marcus/ai/habil.htm",
}
@InProceedings{Hutter:03unimdl,
  author =       "Marcus Hutter",
  title =        "Sequence Prediction based on Monotone Complexity",
  booktitle =    "Proc. 16th Annual Conf. on Learning Theory ({COLT'03})",
  address =      "Washington, DC",
  series =       "LNAI",
  volume =       "2777",
  _editor =       "B. Sch{\"o}lkopf and M. K. Warmuth",
  publisher =    "Springer, Berlin",
  pages =        "506--521",
  year =         "2003",
  isbn =         "3-540-40720-0",
  http =         "http://www.hutter1.net/ai/unimdl.htm",
  url =          "http://arxiv.org/abs/cs.AI/0306036",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-09-03.ps.gz",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  Solomonoff's prior; Monotone Kolmogorov Complexity;
                  Minimal Description Length; Convergence;
                  Self-Optimizingness",
  abstract =     "This paper studies sequence prediction based on the
                  monotone Kolmogorov complexity $\Km=-\lb m$, i.e.\ based on
                  universal MDL. $m$ is extremely close to Solomonoff's prior $M$,
                  the latter being an excellent predictor in deterministic as well
                  as probabilistic environments, where performance is measured in
                  terms of convergence of posteriors or losses. Despite this
                  closeness to $M$, it is difficult to assess the prediction quality
                  of $m$, since little is known about the closeness of their
                  posteriors, which are the important quantities for prediction.
                  We show that for deterministic computable environments, the
                  ``posterior'' and losses of $m$ converge, but rapid convergence
                  could only be shown on-sequence; the off-sequence behavior is
                  unclear. In probabilistic environments, neither the posterior nor
                  the losses converge, in general.",
  znote =        "Acceptance rate: 49/92 = 53\%",
}
@InProceedings{Hutter:03unipriors,
  author =       "Marcus Hutter",
  title =        "On the Existence and Convergence of Computable Universal Priors",
  booktitle =    "Proc. 14th International Conf. on Algorithmic Learning Theory ({ALT'03})",
  address =      "Sapporo",
  _editor =       "Ricard Gavald{\'a} and Klaus P. Jantke and Eiji Takimoto",
  series =       "LNAI",
  volume =       "2842",
  publisher =    "Springer, Berlin",
  pages =        "298--312",
  _month =        sep,
  year =         "2003",
  ISSN =         "0302-9743",
  isbn =         "3-540-20291-9",
  http =         "http://www.hutter1.net/ai/uniprior.htm",
  url =          "http://arxiv.org/abs/cs.LG/0305052",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-05-03.ps.gz",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  Solomonoff's prior; universal probability;
                  mixture distributions; posterior convergence;
                  computability concepts; Martin-L{\"o}f randomness.",
  abstract =     "Solomonoff unified Occam's razor and Epicurus' principle
                  of multiple explanations to one elegant, formal, universal theory
                  of inductive inference, which initiated the field of algorithmic
                  information theory. His central result is that the posterior of
                  his universal semimeasure $M$ converges rapidly to the true
                  sequence generating posterior $\mu$, if the latter is computable.
                  Hence, $M$ is eligible as a universal predictor in case of unknown
                  $\mu$. We investigates the existence, computability and convergence of
                  universal (semi)measures for a hierarchy of computability classes:
                  finitely computable, estimable, (co)enumerable, and approximable.
                  For instance, $\MM(x)$ is known to be enumerable, but not finitely
                  computable, and to dominates all enumerable semimeasures.
                  We define seven classes of (semi)measures based on these four
                  computability concepts. Each class may or may not contain a
                  (semi)measures which dominates all elements of another class. The
                  analysis of these 49 cases can be reduced to four basic cases, two
                  of them being new. We present proofs for discrete and continuous
                  semimeasures.
                  We also investigate more closely the type of convergence, possibly
                  implied by universality (in difference and in ratio, with probability
                  1, in mean sum, and for Martin-L{\"o}f random sequences).",
  znote =        "Acceptance rate: 19/37 = 51\%?",
}
@InProceedings{Hutter:03mlconv,
  author =       "Marcus Hutter",
  title =        "An Open Problem Regarding the Convergence
                  of Universal A Priori Probability",
  booktitle =    "Proc. 16th Annual Conf. on Learning Theory ({COLT'03})",
  address =      "Washington, DC",
  series =       "LNAI",
  volume =       "2777",
  _editor =       "B. Sch{\"o}lkopf and M. K. Warmuth",
  publisher =    "Springer, Berlin",
  pages =        "738--740",
  year =         "2003",
  isbn =         "3-540-40720-0",
  url =          "http://www.hutter1.net/ai/mlconv.htm",
  keywords =     "Sequence prediction; Algorithmic Information Theory;
                  Solomonoff's prior; universal probability;
                  posterior convergence; Martin-L{\"o}f randomness.",
  abstract =     "Is the textbook result that Solomonoff's universal
                  posterior converges to the true posterior for all Martin-L{\"o}f
                  random sequences true?",
}
@Article{Hutter:03optisp,
  author =       "Marcus Hutter",
  title =        "Optimality of Universal {B}ayesian Prediction for General Loss and Alphabet",
  _month =        Nov,
  volume =       "4",
  year =         "2003",
  pages =        "971--1000",
  journal =      "Journal of Machine Learning Research",
  publisher =    "MIT Press",
  http =         "http://www.hutter1.net/ai/optisp.htm",
  url =          "http://arxiv.org/abs/cs.LG/0311014",
  url2 =         "http://www.jmlr.org/papers/volume4/hutter03a/",
  url3 =         "http://www.jmlr.org/papers/v4/hutter03a.html",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-02-02.ps.gz",
  keywords =     "Bayesian sequence prediction; mixture distributions; Solomonoff
                  induction; Kolmogorov complexity; learning; universal probability;
                  tight loss and error bounds; Pareto-optimality; games of chance;
                  classification.",
  abstract =     "Various optimality properties of universal sequence predictors
                  based on Bayes-mixtures in general, and Solomonoff's prediction
                  scheme in particular, will be studied. The probability of
                  observing $x_t$ at time $t$, given past observations
                  $x_1...x_{t-1}$ can be computed with the chain rule if the true
                  generating distribution $\mu$ of the sequences $x_1x_2x_3...$ is
                  known. If $\mu$ is unknown, but known to belong to a countable or
                  continuous class $\M$ one can base ones prediction on the
                  Bayes-mixture $\xi$ defined as a $w_\nu$-weighted sum or integral
                  of distributions $\nu\in\M$. The cumulative expected loss of the
                  Bayes-optimal universal prediction scheme based on $\xi$ is shown
                  to be close to the loss of the Bayes-optimal, but infeasible
                  prediction scheme based on $\mu$. We show that the bounds are
                  tight and that no other predictor can lead to significantly
                  smaller bounds. Furthermore, for various performance measures, we
                  show Pareto-optimality of $\xi$ and give an Occam's razor argument
                  that the choice $w_\nu\sim 2^{-K(\nu)}$ for the weights is
                  optimal, where $K(\nu)$ is the length of the shortest program
                  describing $\nu$. The results are applied to games of chance,
                  defined as a sequence of bets, observations, and rewards. The
                  prediction schemes (and bounds) are compared to the popular
                  predictors based on expert advice. Extensions to infinite
                  alphabets, partial, delayed and probabilistic prediction,
                  classification, and more active systems are briefly discussed.",
  znote =        "Inofficial numbers: Acceptance rate: 27\%",
}
@InProceedings{Hutter:03idm,
  author =       "Marcus Hutter",
  title =        "Robust Estimators under the {I}mprecise {D}irichlet {M}odel",
  booktitle =    "Proc. 3rd International Symposium on
                  Imprecise Probabilities and Their Application ({ISIPTA-2003})",
  _editor =       "Jean-Marc Bernard and Teddy Seidenfeld and Marco Zaffalon",
  publisher =    "Carleton Scientific",
  series =       "Proceedings in Informatics",
  volume =       "18",
  address =      "Canada",
  year =         "2003",
  pages =        "274--289",
  isbn =         "1-894145-17-8",
  http =         "http://www.hutter1.net/ai/idm.htm",
  url =          "http://arxiv.org/abs/math.PR/0305121",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-03-03.ps.gz",
  keywords =     "Imprecise Dirichlet Model; exact, conservative, approximate,
                  robust, confidence interval estimates; entropy; mutual information.",
  abstract =     "Walley's Imprecise Dirichlet Model (IDM) for categorical data
                  overcomes several fundamental problems which other approaches to
                  uncertainty suffer from. Yet, to be useful in practice, one needs
                  efficient ways for computing the imprecise=robust sets or
                  intervals. The main objective of this work is to derive exact,
                  conservative, and approximate, robust and credible interval
                  estimates under the IDM for a large class of statistical
                  estimators, including the entropy and mutual information.",
  znote =        "Inofficial numbers: Acceptance rate: 44/55 = 80\% ?",
}
@InProceedings{Hutter:03mimiss,
  author =       "Marcus Hutter and Marco Zaffalon",
  title =        "Bayesian Treatment of Incomplete Discrete Data applied
                  to Mutual Information and Feature Selection",
  _month =        sep,
  year =         "2003",
  pages =        "396--406",
  series =       "LNAI",
  volume =       "2821",
  booktitle =    "Proc. 26th German Conf. on Artificial Intelligence (KI-2003)",
  _editor =       "A. G{\"u}nter, R. Kruse and B. Neumann",
  publisher =    "Springer, Berlin",
  issn =         "0302-9743",
  isbn =         "3-540-00168-9",
  http =         "http://www.hutter1.net/ai/mimiss.htm",
  url =          "http://arxiv.org/abs/cs.LG/0306126",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-15-03.ps.gz",
  keywords =     "Incomplete data, Bayesian statistics, expectation maximization,
                  global optimization, Mutual Information, Cross Entropy, Dirichlet
                  distribution, Second order distribution, Credible intervals,
                  expectation and variance of mutual information, missing data,
                  Robust feature selection, Filter approach, naive Bayes classifier.",
  abstract =     "Given the joint chances of a pair of random variables one can
                  compute quantities of interest, like the mutual information. The
                  Bayesian treatment of unknown chances involves computing, from a
                  second order prior distribution and the data likelihood, a
                  posterior distribution of the chances. A common treatment of
                  incomplete data is to assume ignorability and determine the
                  chances by the expectation maximization (EM) algorithm. The two
                  different methods above are well established but typically
                  separated. This paper joins the two approaches in the case of
                  Dirichlet priors, and derives efficient approximations for the
                  mean, mode and the (co)variance of the chances and the mutual
                  information. Furthermore, we prove the unimodality of the
                  posterior distribution, whence the important property of
                  convergence of EM to the global maximum in the chosen framework.
                  These results are applied to the problem of selecting features for
                  incremental learning and naive Bayes classification. A fast filter
                  based on the distribution of mutual information is shown to
                  outperform the traditional filter based on empirical mutual
                  information on a number of incomplete real data sets.",
  znote =        "Acceptance rate: 42/90 = 46\%",
}
@Article{Hutter:03spupper,
  author =       "Marcus Hutter",
  title =        "Convergence and Loss Bounds for {Bayesian} Sequence Prediction",
  _month =        aug,
  volume =       "49",
  number =       "8",
  year =         "2003",
  pages =        "2061--2067",
  address =      "Manno(Lugano), CH",
  journal =      "IEEE Transactions on Information Theory",
  issn =         "0018-9448",
  http =         "http://www.hutter1.net/ai/spupper.htm",
  url =          "http://arxiv.org/abs/cs.LG/0301014",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-09-01.ps.gz",
  keywords =     "Bayesian sequence prediction;
                  general loss function and bounds;
                  convergence; mixture distributions.",
  abstract =     "The probability of observing $x_t$ at time $t$, given past
                  observations $x_1...x_{t-1}$ can be computed with Bayes rule if
                  the true generating distribution $\mu$ of the sequences
                  $x_1x_2x_3...$ is known. If $\mu$ is unknown, but known to belong
                  to a class $M$ one can base ones prediction on the Bayes mix
                  $\xi$ defined as a weighted sum of distributions $\nu\in M$.
                  Various convergence results of the mixture posterior $\xi_t$ to
                  the true posterior $\mu_t$ are presented. In particular a new
                  (elementary) derivation of the convergence $\xi_t/\mu_t\to 1$ is
                  provided, which additionally gives the rate of convergence. A
                  general sequence predictor is allowed to choose an action $y_t$
                  based on $x_1...x_{t-1}$ and receives loss $\ell_{x_t y_t}$ if
                  $x_t$ is the next symbol of the sequence. No assumptions are made
                  on the structure of $\ell$ (apart from being bounded) and $M$.
                  The Bayes-optimal prediction scheme $\Lambda_\xi$ based on mixture
                  $\xi$ and the Bayes-optimal informed prediction scheme
                  $\Lambda_\mu$ are defined and the total loss $L_\xi$ of
                  $\Lambda_\xi$ is bounded in terms of the total loss $L_\mu$ of
                  $\Lambda_\mu$. It is shown that $L_\xi$ is bounded for bounded
                  $L_\mu$ and $L_\xi/L_\mu\to 1$ for $L_\mu\to \infty$. Convergence
                  of the instantaneous losses is also proven.",
}

%-------------Publications-of-Marcus-Hutter-2002--------------%

@Article{Hutter:02ulaos,
  author =       "J. Schmidhuber and M. Hutter",
  title =        "Universal Learning Algorithms and Optimal Search",
  journal =      "NIPS 2001 Workshop",
  volume =       "",
  pages =        "",
  year =         "2002",
  note =         "http://www.idsia.ch/$_{^\sim}$marcus\linebreak[1]/idsia/nipsws.htm",
}
@InProceedings{Hutter:02feature,
  author =       "Marco Zaffalon and Marcus Hutter",
  title =        "Robust Feature Selection by Mutual Information Distributions",
  _month =        jun,
  year =         "2002",
  pages =        "577--584",
  booktitle =    "Proc. 18th International Conf. on
                  Uncertainty in Artificial Intelligence (UAI-2002)",
  _editor =       "A. Darwiche and N. Friedman",
  publisher =    "Morgan Kaufmann, San Francisco, CA",
  http =         "http://www.hutter1.net/ai/feature.htm",
  url =          "http://arxiv.org/abs/cs.AI/0206006",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-08-02.ps.gz",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Robust feature selection, Filter approach, naive Bayes classifier,
                  Mutual Information, Cross Entropy, Dirichlet distribution, Second
                  order distribution, Bayesian statistics, Credible intervals,
                  expectation and variance of mutual information, missing data.",
  abstract =     "Mutual information is widely used in artificial intelligence, in a
                  descriptive way, to measure the stochastic dependence of discrete random
                  variables. In order to address questions such as the reliability of the
                  empirical value, one must consider sample-to-population inferential
                  approaches. This paper deals with the distribution of mutual information, as
                  obtained in a Bayesian framework by a second-order Dirichlet prior
                  distribution. The exact analytical expression for the mean and an
                  analytical approximation of the variance are reported. Asymptotic
                  approximations of the distribution are proposed. The results are applied to
                  the problem of selecting features for incremental learning and
                  classification of the naive Bayes classifier. A fast, newly defined method
                  is shown to outperform the traditional approach based on empirical mutual
                  information on a number of real data sets. Finally, a theoretical
                  development is reported that allows one to efficiently extend the above
                  methods to incomplete samples in an easy and effective way.",
  znote =        "Acceptance rate: 66/192 = 34\%",
}
@InProceedings{Hutter:02selfopt,
  author =       "Marcus Hutter",
  title =        "Self-Optimizing and {P}areto-Optimal Policies in
                  General Environments based on {B}ayes-Mixtures",
  _month =        jul,
  series =       "LNAI",
  volume =       "2375",
  year =         "2002",
  pages =        "364--379",
  address =      "Sydney",
  booktitle =    "Proc. 15th Annual Conf. on Computational
                 Learning Theory ({COLT'02})",
  _editor =       "J. Kivinen and R. H. Sloan",
  publisher =    "Springer, Berlin",
  http =         "http://www.hutter1.net/ai/selfopt.htm",
  url =          "http://arxiv.org/abs/cs.AI/0204040",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-04-02.ps.gz",
  keywords =     "Rational agents, sequential decision theory,
                  reinforcement learning, value function, Bayes mixtures,
                  self-optimizing policies, Pareto-optimality,
                  unbounded effective horizon, (non) Markov decision
                  processes.",
  abstract =     "The problem of making sequential decisions in unknown
                  probabilistic environments is studied. In cycle $t$ action $y_t$
                  results in perception $x_t$ and reward $r_t$, where all quantities
                  in general may depend on the complete history. The perception
                  $x_t'$ and reward $r_t$ are sampled from the (reactive)
                  environmental probability distribution $\mu$. This very general
                  setting includes, but is not limited to, (partial observable, k-th
                  order) Markov decision processes. Sequential decision theory tells
                  us how to act in order to maximize the total expected reward,
                  called value, if $\mu$ is known. Reinforcement learning is usually
                  used if $\mu$ is unknown. In the Bayesian approach one defines a
                  mixture distribution $\xi$ as a weighted sum of distributions
                  $\nu\in\M$, where $\M$ is any class of distributions including the
                  true environment $\mu$. We show that the Bayes-optimal policy
                  $p^\xi$ based on the mixture $\xi$ is self-optimizing in the sense
                  that the average value converges asymptotically for all $\mu\in\M$
                  to the optimal value achieved by the (infeasible) Bayes-optimal
                  policy $p^\mu$ which knows $\mu$ in advance. We show that the
                  necessary condition that $\M$ admits self-optimizing policies at
                  all, is also sufficient. No other structural assumptions are made
                  on $\M$. As an example application, we discuss ergodic Markov
                  decision processes, which allow for self-optimizing policies.
                  Furthermore, we show that $p^\xi$ is Pareto-optimal in the sense
                  that there is no other policy yielding higher or equal value in
                  {\em all} environments $\nu\in\M$ and a strictly higher value in
                  at least one.",
  znote =        "Acceptance rate: 26/55 = 47\%",
}
@InProceedings{Hutter:01xentropy,
  author =       "Marcus Hutter",
  title =        "Distribution of Mutual Information",
  _month =        dec,
  booktitle =    "Advances in Neural Information Processing Systems 14",
  _editor =       "T. G. Dietterich and S. Becker and Z. Ghahramani",
  publisher =    "MIT Press",
  address =      "Cambridge, MA",
  pages =        "399--406",
  year =         "2002",
  http =         "http://www.hutter1.net/ai/xentropy.htm",
  url =          "http://arxiv.org/abs/cs.AI/0112019",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-13-01.ps.gz",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Mutual Information, Cross Entropy, Dirichlet distribution, Second
                  order distribution, expectation and variance of mutual
                  information.",
  abstract =     "The mutual information of two random variables i and j with joint
                  probabilities t_ij is commonly used in learning Bayesian nets as
                  well as in many other fields. The chances t_ij are usually
                  estimated by the empirical sampling frequency n_ij/n leading to a
                  point estimate I(n_ij/n) for the mutual information. To answer
                  questions like ``is I(n_ij/n) consistent with zero?'' or ``what is
                  the probability that the true mutual information is much larger
                  than the point estimate?'' one has to go beyond the point estimate.
                  In the Bayesian framework one can answer these questions by
                  utilizing a (second order) prior distribution p(t) comprising
                  prior information about t. From the prior p(t) one can compute the
                  posterior p(t|n), from which the distribution p(I|n) of the mutual
                  information can be calculated. We derive reliable and quickly
                  computable approximations for p(I|n). We concentrate on the mean,
                  variance, skewness, and kurtosis, and non-informative priors. For
                  the mean we also give an exact expression. Numerical issues and
                  the range of validity are discussed.",
  znote =        "Acceptance rate: 196/660 = 30\%",
}
@InProceedings{Hutter:02fuss,
  author =       "Marcus Hutter",
  title =        "Fitness Uniform Selection to Preserve Genetic Diversity",
  booktitle =    "Proc. 2002 Congress on Evolutionary Computation (CEC-2002)",
  address =      "Honolulu, HI",
  publisher =    "IEEE",
  ISSN =         "1098-7576",
  _month =        may,
  year =         "2002",
  pages =        "783--788",
  keywords =     "Evolutionary algorithms, fitness uniform selection strategy,
                  preserve diversity, local optima, evolution,
                  correlated recombination, crossover.",
  http =         "http://www.hutter1.net/ai/pfuss.htm",
  url =          "http://arxiv.org/abs/cs.AI/0103015",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-01-01.ps.gz",
  abstract =     "In evolutionary algorithms, the fitness of a population increases
                  with time by mutating and recombining individuals and by a biased
                  selection of more fit individuals. The right selection pressure is
                  critical in ensuring sufficient optimization progress on the one
                  hand and in preserving genetic diversity to be able to escape from
                  local optima on the other. We propose a new selection scheme,
                  which is uniform in the fitness values. It generates selection
                  pressure towards sparsely populated fitness regions, not
                  necessarily towards higher fitness, as is the case for all other
                  selection schemes. We show that the new selection scheme can be
                  much more effective than standard selection schemes.",
  znote =        "Acceptance rate: 264/372 = 71\%",
}
@Article{Hutter:02fast,
  author =       "Marcus Hutter",
  title =        "The Fastest and Shortest Algorithm for All Well-Defined Problems",
  journal =      "International Journal of Foundations of Computer Science",
  publisher =    "World Scientific",
  volume =       "13",
  number =       "3",
  pages =        "431--443",
  year =         "2002",
  keywords =     "Acceleration, Computational Complexity,
                  Algorithmic Information Theory, Kolmogorov Complexity, Blum's
                  Speed-up Theorem, Levin Search.",
  http =         "http://www.hutter1.net/ai/pfastprg.htm",
  url =          "http://arxiv.org/abs/cs.CC/0206022",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-00.ps.gz",
  abstract =     "An algorithm M is described that solves any well-defined problem
                  p as quickly as the fastest algorithm computing a solution to
                  p, save for a factor of 5 and low-order additive terms. M
                  optimally distributes resources between the execution of provably
                  correct p-solving programs and an enumeration of all proofs,
                  including relevant proofs of program correctness and of time
                  bounds on program runtimes. M avoids Blum's speed-up theorem by
                  ignoring programs without correctness proof. M has broader
                  applicability and can be faster than Levin's universal search, the
                  fastest method for inverting functions save for a large
                  multiplicative constant. An extension of Kolmogorov complexity and
                  two novel natural measures of function complexity are used to show
                  that the most efficient program computing some function f is
                  also among the shortest programs provably computing f.",
  press =        "http://guide.supereva.it/c_/interventi/2001/04/38469.shtml",
}
@Article{Hutter:02uspatent,
  author =       "Marcus Hutter",
  title =        "System and method for analysing and displaying two- or three-dimensional sets of data",
  volume =       "number US2002041701, pages 1--15",
  journal =      "{\rm BrainLAB}, US patent",
  year =         "2002",
  url =          "http://l2.espacenet.com/espacenet/bnsviewer?CY=ep&LG=en&DB=EPD&PN=US2002041701&ID=US2002041701A1+I+",
  note =          "\\ http://l2.espacenet.com/espacenet/bnsviewer?CY=ep\&LG=en\& DB=EPD\&PN=US2002041701\&ID=US2002041701A1+I+",
}

%-------------Publications-of-Marcus-Hutter-2001--------------%

@Article{Hutter:01eupatent,
  author =       "Marcus Hutter",
  title =        "{S}tufenfreie {D}arstellung von zwei- oder dreidimensionalen Datens{\"a}tzen durch kr{\"u}mmungsminimierende {V}erschiebung von {P}ixelwerten",
  volume =       "number EP1184812, pages 1--19",
  journal =      "{\rm BrainLAB}, EU patent",
  year =         "2001",
  url =          "http://l2.espacenet.com/espacenet/bnsviewer?CY=ep&LG=en&DB=EPD&PN=EP1184812&ID=EP+++1184812A1+I+",
  note =          "\\ http://l2.espacenet.com/espacenet/bnsviewer?CY=ep\&LG=en\& DB=EPD\&PN=EP1184812\&ID=EP+++1184812A1+I+",
}
@InProceedings{Hutter:01market,
  author =       "Ivo Kwee and Marcus Hutter and Juergen Schmidhuber",
  title =        "Market-Based Reinforcement Learning in Partially Observable Worlds",
  address =      "Vienna",
  _month =        aug,
  year =         "2001",
  pages =        "865--873",
  booktitle =    "Proc. International Conf. on Artificial Neural Networks (ICANN-2001)",
  _journal =      "Artificial Neural Networks (ICANN-2001)",
  _editor =      "Georg Dorffner and Horst Bishof and Kurt Hornik",
  publisher =    "Springer, Berlin",
  series =       "LNCS",
  volume =       "2130",
  http =         "http://www.hutter1.net/ai/pmarket.htm",
  url =          "http://arxiv.org/abs/cs.AI/0105025",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-10-01.ps.gz",
  categories =   "I.2.   [Artificial Intelligence]",
  keywords =     "Hayek system; reinforcement learning; partial observable environment",
  abstract =     "Unlike traditional reinforcement learning (RL), market-based
                  RL is in principle applicable to worlds described by partially
                  observable Markov Decision Processes (POMDPs), where an agent needs
                  to learn short-term memories of relevant previous events in order to
                  execute optimal actions.  Most previous work, however, has focused
                  on reactive settings (MDPs) instead of POMDPs.  Here we reimplement
                  a recent approach to market-based RL and for the first time evaluate
                  it in a toy POMDP setting.",
  znote =        "Acceptance rate: 171/300 = 57\%",
}
@InProceedings{Hutter:01loss,
  author =       "Marcus Hutter",
  title =        "General Loss Bounds for Universal Sequence Prediction",
  year =         "2001",
  pages =        "210--217",
  booktitle =    "Proc. 18th International Conf. on Machine Learning (ICML-2001)",
  address =      "Williamstown, MA",
  _editor =       "Carla. E. Brodley and Andrea Pohoreckyj Danyluk",
  publisher =    "Morgan Kaufmann",
  ISBN =         "1-55860-778-1",
  ISSN =         "1049-1910",
  http =         "http://www.hutter1.net/ai/ploss.htm",
  url =          "http://arxiv.org/abs/cs.AI/0101019",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-03-01.ps.gz",
  categories =   "I.2.   [Artificial Intelligence],
                  I.2.6. [Learning],
                  I.2.8. [Problem Solving, Control Methods and Search],
                  F.1.3. [Complexity Classes].",
  keywords =     "Bayesian and deterministic prediction; general loss function;
                  Solomonoff induction; Kolmogorov complexity; leaning; universal
                  probability; loss bounds; games of chance; partial and delayed
                  prediction; classification.",
  abstract =     "The Bayesian framework is ideally suited for induction problems.
                  The probability of observing $x_k$ at time $k$, given past
                  observations $x_1...x_{k-1}$ can be computed with Bayes rule if
                  the true distribution $\mu$ of the sequences $x_1x_2x_3...$ is
                  known. The problem, however, is that in many cases one does not
                  even have a reasonable estimate of the true distribution. In order
                  to overcome this problem a universal distribution $\xi$ is defined
                  as a weighted sum of distributions $\mu_i\in M$, where $M$ is
                  any countable set of distributions including $\mu$. This is a
                  generalization of Solomonoff induction, in which $M$ is the set of
                  all enumerable semi-measures. Systems which predict $y_k$, given
                  $x_1...x_{k-1}$ and which receive loss $l_{x_k y_k}$ if $x_k$ is
                  the true next symbol of the sequence are considered. It is proven
                  that using the universal $\xi$ as a prior is nearly as good as
                  using the unknown true distribution $\mu$. Furthermore, games of
                  chance, defined as a sequence of bets, observations, and rewards
                  are studied. The time needed to reach the winning zone is
                  estimated. Extensions to arbitrary alphabets, partial and delayed
                  prediction, and more active systems are discussed.",
  znote =        "Acceptance rate: 80/249 = 32\%",
}
@InProceedings{Hutter:01alpha,
  author =       "Marcus Hutter",
  title =        "Convergence and Error bounds for Universal Prediction of Nonbinary Sequences",
  booktitle =    "Proc. 12th European Conf. on Machine Learning (ECML-2001)",
  address =      "Freiburg",
  _editor =      "Luc De Raedt and Peter Flach",
  publisher =    "Springer, Berlin",
  series =       "LNAI",
  volume =       "2167",
  ISBN =         "3-540-42536-5",
  _month =        dec,
  year =         "2001",
  pages =        "239--250",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-07-01.ps.gz",
  http =         "http://www.hutter1.net/ai/palpha.htm",
  url =          "http://arxiv.org/abs/cs.LG/0106036",
  keywords =     "Induction; Solomonoff, Bayesian, deterministic
                  prediction; Kolmogorov complexity; leaning; Loss function;
                  algorithmic information theory; universal probability",
  abstract =     "Solomonoff's uncomputable universal prediction scheme $\xi$ allows
                  to predict the next symbol $x_k$ of a sequence $x_1...x_{k-1}$ for
                  any Turing computable, but otherwise unknown, probabilistic
                  environment $\mu$. This scheme will be generalized to arbitrary
                  environmental classes, which, among others, allows the
                  construction of computable universal prediction schemes $\xi$.
                  Convergence of $\xi$ to $\mu$ in a conditional mean squared sense
                  and with $\mu$ probability $1$ is proven. It is shown that the
                  average number of prediction errors made by the universal $\xi$
                  scheme rapidly converges to those made by the best possible
                  informed $\mu$ scheme. The schemes, theorems and proofs are given
                  for general finite alphabet, which results in additional
                  complications as compared to the binary case.
                  Several extensions of the presented theory and
                  results are outlined. They include general loss functions and
                  bounds, games of chance, infinite alphabet, partial and delayed
                  prediction, classification, and more active
                  systems.",
  znote =        "Acceptance rate: 90/240 = 37\% (includes PKDD)",
}
@InProceedings{Hutter:01grep,
  author =       "Ivo Kwee and Marcus Hutter and Juergen Schmidhuber",
  title =        "Gradient-based Reinforcement Planning in Policy-Search Methods",
  year =         "2001",
  pages =        "27--29",
  booktitle =    "Proc. 5th European Workshop on Reinforcement Learning (EWRL-5)",
  volume =       "27",
  _editor =       "Marco A. Wiering",
  publisher =    "Onderwijsinsituut CKI, Utrecht Univ.",
  _series =       "Cognitieve Kunstmatige Intelligentie",
  ISBN =         "90-393-2874-9",
  ISSN =         "1389-5184",
  keywords =     "Artificial intelligence, reinforcement learning, direct policy search,
                  planning, gradient decent.",
  http =         "http://www.hutter1.net/ai/pgrep.htm",
  url =          "http://arxiv.org/abs/cs.AI/0111060",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-14-01.ps.gz",
  categories =   "I.2.   [Artificial Intelligence],
                  I.2.6. [Learning],
                  I.2.8. [Problem Solving, Control Methods and Search]",
  abstract =     "We introduce a learning method called ``gradient-based reinforcement
                  planning'' (GREP). Unlike traditional DP methods that improve their
                  policy backwards in time, GREP is a gradient-based method that plans
                  ahead and improves its policy {\em before} it actually acts in the
                  environment. We derive formulas for the exact policy gradient that
                  maximizes the expected future reward and confirm our ideas
                  with numerical experiments.",
}
@InProceedings{Hutter:01decision,
  author =       "Marcus Hutter",
  title =        "Universal Sequential Decisions in Unknown Environments",
  year =         "2001",
  pages =        "25--26",
  booktitle =    "Proc. 5th European Workshop on Reinforcement Learning (EWRL-5)",
  volume =       "27",
  _editor =       "Marco A. Wiering",
  publisher =    "Onderwijsinsituut CKI, Utrecht Univ.",
  _series =       "Cognitieve Kunstmatige Intelligentie",
  ISBN =         "90-393-2874-9",
  ISSN =         "1389-5184",
  keywords =     "Artificial intelligence, Rational agents,
                  sequential decision theory, universal Solomonoff induction,
                  algorithmic probability, reinforcement learning, computational
                  complexity, Kolmogorov complexity.",
  url =          "http://www.hutter1.net/ai/pdecision.htm",
  categories =   "I.2.   [Artificial Intelligence],
                  I.2.6. [Learning],
                  I.2.8. [Problem Solving, Control Methods and Search],
                  F.1.3. [Complexity Classes],
                  F.2.   [Analysis of Algorithms and Problem Complexity]",
  abstract =     "We give a brief introduction to the AIXI model, which unifies and
                  overcomes the limitations of sequential decision theory and
                  universal Solomonoff induction. While the former theory is suited
                  for active agents in known environments, the latter is suited for
                  passive prediction of unknown environments.",
  abstract2 =    "Decision theory formally solves the problem of rational agents in
                  uncertain worlds if the true environmental probability
                  distribution is known. Solomonoff's theory of universal induction
                  formally solves the problem of sequence prediction for unknown
                  distribution. We unify both theories and give strong arguments
                  that the resulting universal AIXI model behaves optimal in any
                  computable environment.",
}
@InProceedings{Hutter:01aixi,
  author =       "Marcus Hutter",
  title =        "Towards a Universal Theory of Artificial Intelligence based on Algorithmic
                  Probability and Sequential Decisions",
  year =         "2001",
  pages =        "226--238",
  booktitle =    "Proc. 12th European Conf. on
                  Machine Learning (ECML-2001)",
  address =      "Freiburg",
  _editor =      "Luc De Raedt and Peter Flach",
  publisher =    "Springer, Berlin",
  series =       "LNAI",
  volume =       "2167",
  ISBN =         "3-540-42536-5",
  keywords =     "Artificial intelligence, Rational agents,
                  sequential decision theory, universal Solomonoff induction,
                  algorithmic probability, reinforcement learning, computational
                  complexity, theorem proving, probabilistic reasoning, Kolmogorov
                  complexity, Levin search.",
  http =         "http://www.hutter1.net/ai/paixi.htm",
  url =          "http://arxiv.org/abs/cs.AI/0012011",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-14-00.ps.gz",
  categories =   "I.2.   [Artificial Intelligence],
                  I.2.3. [Deduction and Theorem Proving],
                  I.2.6. [Learning],
                  I.2.8. [Problem Solving, Control Methods and Search],
                  F.1.3. [Complexity Classes],
                  F.2.   [Analysis of Algorithms and Problem Complexity]",
  abstract =     "Decision theory formally solves the problem of rational agents in
                  uncertain worlds if the true environmental probability
                  distribution is known. Solomonoff's theory of universal induction
                  formally solves the problem of sequence prediction for unknown
                  distribution. We unify both theories and give strong arguments
                  that the resulting universal AIXI model behaves optimally in any
                  computable environment. The major drawback of the AIXI model is
                  that it is uncomputable. To overcome this problem, we construct a
                  modified algorithm AIXI^tl, which is still superior to any
                  other time t and space l bounded agent. The computation time
                  of AIXI^tl is of the order t x 2^l.",
  znote =        "Acceptance rate: 90/240 = 37\% (includes PKDD)",
}
@Article{Hutter:01errbnd,
  author =       "Marcus Hutter",
  title =        "New Error Bounds for {Solomonoff} Prediction",
  year =         "2001",
  volume =       "62",
  number =       "4",
  pages =        "653--667",
  journal =      "Journal of Computer and System Sciences",
  address =      "Manno(Lugano), CH",
  keywords =     "Kolmogorov Complexity, Solomonoff Prediction, Error
                 Bound, Induction, Learning, Algorithmic Information
                 Theory, Bayes",
  http =         "http://www.hutter1.net/ai/perrbnd.htm",
  url =          "http://arxiv.org/abs/cs.AI/9912008",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-11-00.ps.gz",
  abstract =     "Several new relations between Solomonoff prediction
                  and Bayesian prediction and general probabilistic
                  prediction schemes will be proved. Among others they
                  show that the number of errors in Solomonoff prediction
                  is finite for computable prior probability, if finite
                  in the Bayesian case. Deterministic variants will also
                  be studied. The most interesting result is that the
                  deterministic variant of Solomonoff prediction is
                  optimal compared to any other probabilistic or
                  deterministic prediction scheme apart from additive
                  square root corrections only. This makes it well suited
                  even for difficult prediction problems, where it does
                  not suffice when the number of errors is minimal to
                  within some factor greater than one. Solomonoff's
                  original bound and the ones presented here complement
                  each other in a useful way.",
}

%-------------Publications-of-Marcus-Hutter-2000--------------%

@Article{Hutter:00speed,
  author =       "Marcus Hutter",
  title =        "An effective Procedure for Speeding up Algorithms",
  year =         "10 pages, 2001",
  journal =      "Presented at the 3rd Workshop on Algorithmic
                  Information Theory (TAI-2001)",
  keywords =     "Acceleration, Computational Complexity,
                  Algorithmic Information Theory, Blum's Speed-up, Levin Search.",
  http =         "http://www.hutter1.net/ai/pspeed.htm",
  url =          "http://arxiv.org/abs/cs.CC/0102018",
  ftp =          "http://www.idsia.ch/idsiareport/IDSIA-16-00-v1.ps.gz",
  abstract =     "The provably asymptotically fastest algorithm within a factor of 5
                  for formally described problems will be constructed. The main idea
                  is to enumerate all programs provably equivalent to the original
                  problem by enumerating all proofs. The algorithm could be
                  interpreted as a generalization and improvement of Levin search,
                  which is, within a multiplicative constant, the fastest algorithm
                  for inverting functions. Blum's speed-up theorem is avoided by
                  taking into account only programs for which a correctness proof
                  exists. Furthermore, it is shown that the fastest program that
                  computes a certain function is also one of the shortest programs
                  provably computing this function. To quantify this statement, the
                  definition of Kolmogorov complexity is extended, and two new
                  natural measures for the complexity of a function are defined.",
}
TechReport{Hutter:00kcunai,
  author =       "Marcus Hutter",
  title  =       "A Theory of Universal Artificial Intelligence based on Algorithmic Complexity",
  number =       "cs.AI/0004001",
  month =        apr,
  year =         "2000",
  institution =  "M{\"u}nchen, 62 pages",
  keywords =     "Artificial intelligence, algorithmic complexity,
                  sequential decision theory; induction; Solomonoff; Kolmogorov;
                  Bayes; reinforcement learning; universal sequence prediction;
                  strategic games; function minimization; supervised learning.",
  url =          "http://arxiv.org/abs/cs.AI/0004001",
  http =         "http://www.hutter1.net/ai/pkcunai.htm",
  abstract =     "Decision theory formally solves the problem of rational agents in
                  uncertain worlds if the true environmental prior probability
                  distribution is known. Solomonoff's theory of universal induction
                  formally solves the problem of sequence prediction for unknown
                  prior distribution. We combine both ideas and get a parameterless
                  theory of universal Artificial Intelligence. We give strong
                  arguments that the resulting AIXI model is the most intelligent
                  unbiased agent possible. We outline for a number of problem
                  classes, including sequence prediction, strategic games, function
                  minimization, reinforcement and supervised learning, how the
                  AIXI model can formally solve them. The major drawback of the
                  AIXI model is that it is uncomputable. To overcome this
                  problem, we construct a modified algorithm AIXI-tl, which is
                  still effectively more intelligent than any other time t and
                  space l bounded agent. The computation time of AIXI-tl
                  is of the order tx2^l. Other discussed topics are formal
                  definitions of intelligence order relations, the horizon problem
                  and relations of the AIXI theory to other AI approaches.",
  note =         "http://arxiv.org/abs/cs.AI/0004001",
}

%----------Publications-of-Marcus-Hutter-1987-1999------------%

@Article{Hutter:97instanto,
  author =       "Marcus Hutter",
  author =       "Marcus Hutter",
  title =        "Instantons and Meson Correlators in {QCD}",
  year =         "1997",
  pages =        "131--143",
  journal =      "Zeitschrift f{\"u}r Physik",
  volume =       "C74",
  url =          "http://arxiv.org/abs/hep-ph/9501245",
  http =         "http://www.hutter1.net/physics/pinstant.htm",
  abstract =     "Various QCD correlators are calculated in the instanton liquid model
                  in zeromode approximation and $1/N_c$ expansion. Previous works are
                  extended by including dynamical quark loops. In contrast to the
                  original ``perturbative'' $1/N_c$ expansion not all quark loops are
                  suppressed. In the flavor singlet meson correlators a chain of quark
                  bubbles survives the $N_c\to\infty$ limit causing a massive
                  $\eta^\prime$ in the pseudoscalar correlator while keeping massless
                  pions in the triplet correlator. The correlators are plotted and
                  meson masses and couplings are obtained from a spectral fit. They
                  are compared to the values obtained from numerical studies of the
                  instanton liquid and to experimental results.",
}
@Article{Hutter:97family,
  author =       "A. Blumhofer and M. Hutter",
  _author =       "Andreas Blumhofer and Marcus Hutter",
  title =        "Family Structure from Periodic Solutions of an Improved Gap Equation",
  journal =      "Nuclear Physics",
  volume =       "B484",
  year =         "1997",
  pages =        "80--96",
  url =          "http://arxiv.org/abs/hep-ph/9605393",
  http =         "http://www.hutter1.net/physics/pfamily.htm",
  abstract =     "Fermion mass models usually contain a horizontal symmetry and
                  therefore fail to predict the exponential mass spectrum of the Standard
                  Model in a natural way. In dynamical symmetry breaking there are
                  different concepts to introduce a fermion mass spectrum, which
                  automatically has the desired hierarchy. In constructing a specific
                  model we show that in some modified gap equations periodic solutions
                  with several fermion poles appear. The stability of these excitations
                  and the application of this toy model are discussed. The mass ratios
                  turn out to be approximately e^pi and e^2pi. Thus the model explains
                  the large ratios of fermion masses between successive generations in
                  the Standard Model without introducing large or small numbers by hand.",
  note =         "Missing figures in B494 (1997) 485",
}
@PhdThesis{Hutter:96thesis,
  author =       "Marcus Hutter",
  school =       "Faculty for Theoretical Physics, LMU Munich",
  title =        "Instantons in QCD: Theory and application of the instanton liquid model",
  year =         "1996",
  pages =        "1--100",
  url =          "http://arxiv.org/abs/hep-ph/0107098 ",
  http =         "http://www.hutter1.net/physics/pdise.htm",
  abstract =     "Numerical and analytical studies of the instanton liquid model have
                  allowed the determination of many hadronic parameters during the
                  last 13 years. Most part of this thesis is devoted to the extension
                  of the analytical methods. The meson correlation (polarization)
                  functions are calculated in the instanton liquid model including
                  dynamical quark loops. The correlators are plotted and masses and
                  couplings of the sigma, rho, omega, a1 and f1 are obtained from a
                  spectral fit. A separated analysis allows the determination of the
                  eta' mass too. The results agree with the experimental values on
                  a 10% level. Further I give some predictions for the proton form
                  factors, which are related to the proton spin (problem). A gauge
                  invariant gluon mass for small momenta is also calculated. At the
                  end of the work some predictions are given, which do not rely on
                  the instanton liquid model. A gauge invariant quark propagator is
                  calculated in the one instanton background and is compared to the
                  regular and singular propagator. An introduction to the skill of
                  choosing a suitable gauge, especially a criterion for choosing regular
                  or singular gauge, is given. An application is the derivation of a
                  finite relation between the quark condensate and the QCD scale Lambda,
                  where neither an infrared cutoff nor a specific instanton model has
                  been used. In general the instanton liquid model exhibits an astonishing
                  internal consistency and a good agreement with the experimental data.",
  note =         "Translated from the German original http://www.hutter1.net/physics/pdiss.htm",
}
@PhdThesis{Hutter:96diss,
  author =       "Marcus Hutter",
  school =       "Fakult{\"a}t f{\"u}r Theoretische Physik, LMU M{\"u}nchen",
  title =        "Instantonen in der QCD: Theorie und Anwendungen des Instanton-Fl{\"u}ssigkeit-Modells",
  year =         "1996",
  pages =        "1--105",
  url =          "http://arxiv.org/abs/hep-ph/9603280",
  http =         "http://www.hutter1.net/physics/pdiss.htm",
  abstract =     "Durch numerische Simulation des Instanton-Flüssigkeit-Modells
                  konnten eine Reihe hadronischer Größen in den letzten 13 Jahren
                  bestimmt werden. Der größte Teil dieser Arbeit ist der Erweiterung
                  der analytischen Methoden gewidmet. Die Meson-Korrelatoren
                  (auch Polarisations-Funktionen genannt) werden im Instanton-Flüssigkeits-Modell
                  berechnet, wobei dynamische Quark-Schleifen berücksichtigt werden.
                  Die Korrelatoren werden grafisch dargestellt und die Massen und Kopplungen
                  der sigma, rho, omega, a1 und f1 Mesonen werden mit Hilfe eines spektralen
                  Fits bestimmt. Eine gesonderte Betrachtung ermöglicht auch die Berechnung
                  der eta' Masse. Die Ergebnisse stimmen auf 10% Niveau mit den experimentellen
                  Werten überein. Weiterhin wird versucht, die axialen Formfaktoren des Protons
                  zu bestimmen. Diese stehen in Zusammenhang mit dem Proton-Spin(-Problem).
                  Eine eichinvariante Gluon-Masse wird für kleine Impulse berechnet.
                  Die Arbeit wird abgeschlossen mit einigen Vorhersagen, die sich nicht
                  speziell auf das Instanton-Flüssigkeits-Modell stützen. Im
                  ein-Instanton-Vakuum wird ein eichinvarianter Quark-Propagator berechnet
                  und mit dem regulüren und dem singulären Propagator verglichen.
                  Kriterien für die Wahl einer geeignete Eichung, insbesondere für die
                  Wahl der singulären oder der regulüren Eichung, werden gegeben.
                  Eine Anwendung ist die Herleitung einer endlichen Relation zwischen
                  dem Quark-Kondensat und der QCD-Skala Lambda, wobei weder ein
                  Infrarot-Cutoff noch ein spezifisches Instanton-Modell verwendet werden.
                  Allgemein weist das Instanton-Flüssigkeits-Modell eine erstaunliche interne
                  Konsistenz und gute Übereinstimmung mit experimentellen Daten auf.",
  note =         "English translation available at http://www.hutter1.net/physics/pdise.htm",
}
@Article{Hutter:96eta,
  author =       "Marcus Hutter",
  title =        "The mass of the $\eta'$ in self-dual {QCD}",
  year =         "1996",
  pages =        "275--278",
  journal =      "Physics Letters",
  volume =       "B367",
  url =          "http://arxiv.org/abs/hep-ph/9509401",
  http =         "http://www.hutter1.net/physics/petamas.htm",
  abstract =     "The QCD gauge field is modeled as an ensemble of statistically
                  independent selfdual and antiselfdual regions. This model is
                  motivated from instanton physics. The scale anomaly then allows
                  to relate the topological susceptibility to the gluon condensate.
                  With the help of Wittens formula for m_eta' and an estimate of
                  the suppression of the gluon condensate due to light quarks the
                  mass of the eta' can be related to f_pi and the physical gluon
                  condensate. We get the quite satisfactory value m_eta'=884+-116 MeV.
                  Using the physical eta' mass as an input it is in principle possible
                  to get information about the interaction between instantons and
                  anti-instantons.",
}
TechReport{Hutter:95spin,
  author =       "Marcus Hutter",
  number =       "LMU-95-15",
  institution =  "Theoretische Physik, LMU M{\"u}nchen",
  title =        "Proton Spin in the Instanton Background",
  year =         "1995",
  url =          "http://arxiv.org/abs/hep-ph/9509402",
  http =         "http://www.hutter1.net/physics/pspin.htm",
  abstract =     "The proton form factors are reduced to vacuum correlators
                  of 4 quark fields by assuming independent constituent
                  quarks. The axial singlet quark and gluonic form factors
                  are calculated in the instanton liquid model. A discussion
                  of gauge(in)dependence is given.",
  note =          "15 pages",
}
TechReport{Hutter:95prop,
  author =       "Marcus Hutter",
  number =       "LMU-95-03",
  institution =  "Theoretische Physik, LMU M{\"u}nchen",
  title =        "Gauge Invariant Quark Propagator in the Instanton Background",
  year =         "1995",
  url =          "http://arxiv.org/abs/hep-ph/9502361",
  http =         "http://www.hutter1.net/physics/pprop.htm",
  abstract =     "After a general discussion on the choice of gauge, we compare
                  the quark propagator in the background of one instanton in
                  regular and singular gauge with a gauge invariant propagator
                  obtained by inserting a path-ordered gluon exponential.
                  Using a gauge motivated by this analysis, we were able to
                  obtain a finite result for the quark condensate without
                  introducing an infrared cutoff nor invoking some instanton
                  model.",
  note =        "15 pages",
}
TechReport{Hutter:93gluon,
  author =       "Marcus Hutter",
  number =       "LMU-93-18",
  institution =  "Theoretische Physik, LMU M{\"u}nchen",
  title =        "Gluon Mass from Instantons",
  year =         "1993",
  url =          "http://arxiv.org/abs/hep-ph/9501335",
  http =         "http://www.hutter1.net/physics/pgluon.htm",
  abstract =     "The gluon propagator is calculated in the instanton background
                  in a form appropriate for extracting the momentum dependent
                  gluon mass. In background-xi-gauge we get for the mass 400 MeV
                  for small p^2 independent of the gauge parameter xi.",
  note =         "13 pages",
}
@MastersThesis{Hutter:92cfs,
  author =       "Marcus Hutter",
  school =       "Theoretische Informatik, TU M{\"u}nchen",
  title =        "{I}mplementierung eines {K}lassifizierungs-{S}ystems",
  year =         "1991",
  url =          "http://www.hutter1.net/ai/pcfs.htm",
  ps =           "http://www.hutter1.net/ai/pcfs.ps",
  pdf =          "http://www.hutter1.net/ai/pcfs.pdf",
  abstract =     "A classifier system is a massively parallel rule based system,
                  whose components (classifier) can exchange messages, whose behavior is
                  is assessed by a teacher (reinforcement), and which is able to learn by
                  means of credit assignment and a genetic algorithm. For an introduction
                  we have to refer to the, meanwhile extensive, literature; see especially
                  Goldberg (1989). The concept of a classifier system was first developed
                  by Holland (1986), but meanwhile a multitude of variants and extensions
                  exist (Booker et. al, 1989). So far it is impossible to
                  compare these variants in their performance, statements on the
                  quality of the various approaches are, hence, hard to impossible.
                  The program developed in this diploma thesis allows, for the first time,
                  a direct comparison of the most important variants.
                  The thesis describes the program, in which we have taken special attention
                  to an efficient implementation.",
  zusammenfassung = "Ein Klassifizierungssystem (CFS, engl. Classifiersystem) ist
                  ein massiv paralleles regelbasiertes System, dessen Komponenten
                  (Classifier) Nachrichten (Messages) austauschen können, dessen
                  Verhalten von einem Lehrer beurteilt wird (Reinforcement) und
                  das mittels Credit-Assignment und genetischen Algorithmen fähig
                  ist zu lernen. Für eine einführende Darstellung muß auf die
                  inzwischen sehr umfangreiche Literatur, insbesondere Goldberg (1989),
                  verwiesen werden. Das Konzept des CFS wurde zuerst von Holland (1986)
                  entwickelt, inzwischen gibt es aber eine Vielzahl von Varianten und
                  Erweiterungen (Booker et. al (1989). Bisher ist es nicht möglich,
                  diese Varianten in ihrer Performance zu vergleichen, eine Aussage
                  über die Güte der verschiedenen Ansätze ist somit kaum oder
                  überhaupt nicht möglich. Das in dieser Diplomarbeit erstellte
                  Programm gestattet erstmals bzgl. der wichtigsten Varianten einen
                  direkten Vergleich. In den folgenden Kapiteln wird dieses Programm,
                  bei dem besonders auf eine effiziente Implementierung geachtet wurde,
                  beschrieben.",
  note =         "72 pages with C listing, in German",
}
TechReport{Hutter:90faka,
  author =       "Marcus Hutter",
  institution =  "Universit{\"a}t Erlangen-N{\"u}rnberg \&
                  Technische Universit{\"a}t M{\"u}nchen",
  title =        "{P}arallele {A}lgorithmen in der {S}tr{\"o}mungsmechanik",
  type =         "{F}erianakademie: {N}umerische {M}ethoden der {S}tr{\"o}mungsmechanik",
  year =         "1990",
  url =          "http://www.hutter1.net/official/faka.htm",
  note =         "10 pages, in German",
}
TechReport{Hutter:90fopra,
  author =       "Marcus Hutter",
  institution =  "Theoretische Informatik, TU M{\"u}nchen",
  title =        "A Reinforcement Learning {H}ebb Net",
  year =         "1990",
  type =         "Fortgeschrittenenpraktikum",
  url =          "http://www.hutter1.net/ai/fopra.htm",
  ftp =          "http://www.hutter1.net/ai/fopra.ps.gz",
  pdf =          "http://www.hutter1.net/ai/fopra.pdf",
  abstract =     "This Fopra is motivated by the following observations about
                  human learning and about human neural information processing.
                  On the one side humans are able to learn supervised, unsupervised
                  and by reinforcement, on the other side there is no neural
                  distinction between informative, uninformative and evaluative
                  feedback. Furthermore, the Hebb learning rule is the only
                  biological inspired learning mechanism. If the human brain
                  is indeed a Hebb net this would imply that Hebb nets are
                  able to learn by reinforcement. The goal of this Fopra is
                  to investigate whether and how Hebb nets could be used for
                  reinforcement learning. It is shown that Hebb nets with a
                  suitable prior net topology can indeed learn, at least
                  simple tasks, by reinforcement.",
  note =         "30 pages with Pascal listing, in German",
}
@Article{Hutter:87cad,
  author =       "Marcus Hutter",
  title =        "Fantastische {3D-Graphik} mit dem {CPC-Giga-CAD}",
  journal =      "7. Schneider Sonderheft, Happy Computer, Sonderheft 16",
  publisher =    "Markt\&Technik",
  year =         "1987",
  pages =        "41--92",
  url =          "http://www.hutter1.net/gigacad/gigacad.htm",
  abstract =     "CAD steht fur Computer Aided Design. Bis heute war dieses
                  Gebiet hauptsächlich Domäne der Großrechner.
                  Mit $\gg$CPC-Giga-CAD$\ll$ wird auch auf dem Schneider CPC
                  automatisiertes und computergestütztes Zeichnen und
                  Konstruieren zum Kinderspiel.",
}
 © 2000 by ... [home] [search] [science] [contact] [up] ... Marcus Hutter