Copyright | (c) Justus Sagemüller 2016 |
---|---|
License | GPL v3 |
Maintainer | (@) jsag $ hvl.no |
Stability | experimental |
Portability | portable |
Safe Haskell | None |
Language | Haskell2010 |
Math.LinearMap.Category
Contents
Description
Synopsis
- newtype LinearFunction s v w = LinearFunction {
- getLinearFunction :: v -> w
- type (-+>) v w = LinearFunction (Scalar w) v w
- type Bilinear v w y = LinearFunction (Scalar v) v (LinearFunction (Scalar v) w y)
- lfun :: (EnhancedCat f (LinearFunction s), LinearSpace u, TensorSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => (u -> v) -> f u v
- newtype LinearMap s v w = LinearMap {
- getLinearMap :: TensorProduct (DualVector v) w
- type (+>) v w = LinearMap (Scalar v) v w
- (⊕) :: (u +> w) -> (v +> w) -> (u, v) +> w
- (>+<) :: (u +> w) -> (v +> w) -> (u, v) +> w
- adjoint :: forall v w. (LinearSpace v, LinearSpace w, Scalar v ~ Scalar w) => (v +> DualVector w) -+> (w +> DualVector v)
- (<.>^) :: LinearSpace v => DualVector v -> v -> Scalar v
- (-+|>) :: (EnhancedCat f (LinearFunction s), LSpace u, LSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => DualVector u -> v -> f u v
- newtype Tensor s v w = Tensor {
- getTensorProduct :: TensorProduct v w
- type (⊗) v w = Tensor (Scalar v) v w
- (⊗) :: forall v w. (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v, Num' (Scalar v)) => v -> w -> v ⊗ w
- newtype SymmetricTensor s v = SymTensor {
- getSymmetricTensor :: Tensor s v v
- squareV :: (Num' s, s ~ Scalar v) => TensorSpace v => v -> SymmetricTensor s v
- squareVs :: (Num' s, s ~ Scalar v) => TensorSpace v => [v] -> SymmetricTensor s v
- type (⊗〃+>) v w = LinearMap (Scalar v) (SymmetricTensor (Scalar v) v) w
- currySymBilin :: LinearSpace v => (v ⊗〃+> w) -+> (v +> (v +> w))
- newtype Norm v = Norm {
- applyNorm :: v -+> DualVector v
- type Seminorm v = Norm v
- spanNorm :: forall v. LSpace v => [DualVector v] -> Seminorm v
- euclideanNorm :: HilbertSpace v => Norm v
- (|$|) :: (LSpace v, Floating (Scalar v)) => Seminorm v -> v -> Scalar v
- normSq :: LSpace v => Seminorm v -> v -> Scalar v
- (<$|) :: LSpace v => Norm v -> v -> DualVector v
- scaleNorm :: forall v. LSpace v => Scalar v -> Norm v -> Norm v
- normSpanningSystem :: SimpleSpace v => Seminorm v -> [DualVector v]
- normSpanningSystem' :: (FiniteDimensional v, IEEE (Scalar v)) => Seminorm v -> [v]
- type Variance v = Norm (DualVector v)
- spanVariance :: forall v. LSpace v => [v] -> Variance v
- (|&>) :: LSpace v => DualVector v -> Variance v -> v
- varianceSpanningSystem :: forall v. SimpleSpace v => Variance v -> [v]
- dualNorm :: SimpleSpace v => Norm v -> Variance v
- dualNorm' :: forall v. SimpleSpace v => Variance v -> Norm v
- dependence :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Variance (u, v) -> u +> v
- densifyNorm :: forall v. LSpace v => Norm v -> Norm v
- wellDefinedNorm :: forall v. LinearSpace v => Norm v -> Maybe (Norm v)
- (\$) :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v -> u
- pseudoInverse :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v +> u
- roughDet :: (FiniteDimensional v, IEEE (Scalar v)) => (v +> v) -> Scalar v
- linearRegressionW :: forall s x m y. (LinearSpace x, SimpleSpace y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => Norm y -> (x -> m +> y) -> [(x, y)] -> m
- linearRegression :: forall s x m y. (LinearSpace x, SimpleSpace y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => (x -> m +> y) -> [(x, (y, Norm y))] -> LinearRegressionResult x y m
- data LinearRegressionResult x y m
- linearFit_χν² :: LinearRegressionResult x y m -> Scalar m
- linearFit_bestModel :: LinearRegressionResult x y m -> m
- linearFit_modelUncertainty :: LinearRegressionResult x y m -> Norm m
- eigen :: (FiniteDimensional v, HilbertSpace v, IEEE (Scalar v)) => (v +> v) -> [(Scalar v, v)]
- constructEigenSystem :: (LSpace v, RealFloat (Scalar v)) => Norm v -> Scalar v -> (v -+> v) -> [v] -> [[Eigenvector v]]
- roughEigenSystem :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> (v +> v) -> [Eigenvector v]
- finishEigenSystem :: forall v. (LSpace v, RealFloat (Scalar v)) => Norm v -> [Eigenvector v] -> [Eigenvector v]
- data Eigenvector v = Eigenvector {
- ev_Eigenvalue :: Scalar v
- ev_Eigenvector :: v
- ev_FunctionApplied :: v
- ev_Deviation :: v
- ev_Badness :: Scalar v
- type LSpace v = (LinearSpace v, LinearSpace (Scalar v), LinearSpace (DualVector v), Num' (Scalar v))
- class (VectorSpace v, PseudoAffine v) => TensorSpace v where
- type TensorProduct v w :: *
- scalarSpaceWitness :: ScalarSpaceWitness v
- linearManifoldWitness :: LinearManifoldWitness v
- zeroTensor :: (TensorSpace w, Scalar w ~ Scalar v) => v ⊗ w
- toFlatTensor :: v -+> (v ⊗ Scalar v)
- fromFlatTensor :: (v ⊗ Scalar v) -+> v
- addTensors :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w
- subtractTensors :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w
- scaleTensor :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w)
- negateTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (v ⊗ w)
- tensorProduct :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear v w (v ⊗ w)
- tensorProducts :: (TensorSpace w, Scalar w ~ Scalar v) => [(v, w)] -> v ⊗ w
- transposeTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (w ⊗ v)
- fmapTensor :: (TensorSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w -+> x) (v ⊗ w) (v ⊗ x)
- fzipTensorWith :: (TensorSpace u, TensorSpace w, TensorSpace x, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear ((w, x) -+> u) (v ⊗ w, v ⊗ x) (v ⊗ u)
- coerceFmapTensorProduct :: Functor p => p v -> Coercion a b -> Coercion (TensorProduct v a) (TensorProduct v b)
- wellDefinedVector :: v -> Maybe v
- wellDefinedTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> Maybe (v ⊗ w)
- class (TensorSpace v, Num (Scalar v)) => LinearSpace v where
- type DualVector v :: *
- dualSpaceWitness :: DualSpaceWitness v
- linearId :: v +> v
- idTensor :: v ⊗ DualVector v
- sampleLinearFunction :: (TensorSpace w, Scalar v ~ Scalar w) => (v -+> w) -+> (v +> w)
- toLinearForm :: DualVector v -+> (v +> Scalar v)
- fromLinearForm :: (v +> Scalar v) -+> DualVector v
- coerceDoubleDual :: Coercion v (DualVector (DualVector v))
- trace :: (v +> v) -+> Scalar v
- contractTensorMap :: (TensorSpace w, Scalar w ~ Scalar v) => (v +> (v ⊗ w)) -+> w
- contractMapTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ (v +> w)) -+> w
- contractTensorFn :: forall w. (TensorSpace w, Scalar w ~ Scalar v) => (v -+> (v ⊗ w)) -+> w
- contractLinearMapAgainst :: (LinearSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) (w -+> v) (Scalar v)
- applyDualVector :: LinearSpace v => Bilinear (DualVector v) v (Scalar v)
- applyLinear :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) v w
- composeLinear :: (LinearSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w +> x) (v +> w) (v +> x)
- tensorId :: (LinearSpace w, Scalar w ~ Scalar v) => (v ⊗ w) +> (v ⊗ w)
- applyTensorFunctional :: (LinearSpace u, Scalar u ~ Scalar v) => Bilinear (DualVector (v ⊗ u)) (v ⊗ u) (Scalar v)
- applyTensorLinMap :: (LinearSpace u, TensorSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v) => Bilinear ((v ⊗ u) +> w) (v ⊗ u) w
- class LinearSpace v => SemiInner v where
- dualBasisCandidates :: [(Int, v)] -> Forest (Int, DualVector v)
- tensorDualBasisCandidates :: (SemiInner w, Scalar w ~ Scalar v) => [(Int, v ⊗ w)] -> Forest (Int, DualVector (v ⊗ w))
- symTensorDualBasisCandidates :: [(Int, SymmetricTensor (Scalar v) v)] -> Forest (Int, SymmetricTensor (Scalar v) (DualVector v))
- symTensorTensorDualBasisCandidates :: forall w. (SemiInner w, Scalar w ~ Scalar v) => [(Int, SymmetricTensor (Scalar v) v ⊗ w)] -> Forest (Int, SymmetricTensor (Scalar v) v +> DualVector w)
- cartesianDualBasisCandidates :: [DualVector v] -> (v -> [ℝ]) -> [(Int, v)] -> Forest (Int, DualVector v)
- embedFreeSubspace :: forall v t r. (HasCallStack, SemiInner v, RealFrac' (Scalar v), Traversable t) => t v -> Maybe (ReifiedLens' v (t (Scalar v)))
- class LSpace v => FiniteDimensional v where
- data SubBasis v :: *
- entireBasis :: SubBasis v
- enumerateSubBasis :: SubBasis v -> [v]
- subbasisDimension :: SubBasis v -> Int
- decomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> w) -> (SubBasis v, DList w)
- decomposeLinMapWithin :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> (v +> w) -> Either (SubBasis v, DList w) (DList w)
- recomposeSB :: SubBasis v -> [Scalar v] -> (v, [Scalar v])
- recomposeSBTensor :: (FiniteDimensional w, Scalar w ~ Scalar v) => SubBasis v -> SubBasis w -> [Scalar v] -> (v ⊗ w, [Scalar v])
- recomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> [w] -> (v +> w, [w])
- recomposeContraLinMap :: (LinearSpace w, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v) -> v +> w
- recomposeContraLinMapTensor :: (FiniteDimensional u, LinearSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (v +> DualVector u) -> (v ⊗ u) +> w
- uncanonicallyFromDual :: DualVector v -+> v
- uncanonicallyToDual :: v -+> DualVector v
- addV :: AdditiveGroup w => LinearFunction s (w, w) w
- scale :: VectorSpace v => Bilinear (Scalar v) v v
- inner :: InnerSpace v => Bilinear v v (Scalar v)
- flipBilin :: Bilinear v w y -> Bilinear w v y
- bilinearFunction :: (v -> w -> y) -> Bilinear v w y
- (.⊗) :: (TensorSpace v, HasBasis v, TensorSpace w, Num' (Scalar v), Scalar v ~ Scalar w) => Basis v -> w -> v ⊗ w
- (·) :: TensorQuot v w => (v ⨸ w) -> v -> w
- type DualSpace v = v +> Scalar v
- riesz :: forall v. (FiniteDimensional v, InnerSpace v) => DualVector v -+> v
- coRiesz :: forall v. (LSpace v, InnerSpace v) => v -+> DualVector v
- showsPrecAsRiesz :: forall v. (FiniteDimensional v, InnerSpace v, Show v, HasBasis (Scalar v), Basis (Scalar v) ~ ()) => Int -> DualSpace v -> ShowS
- (.<) :: (FiniteDimensional v, Num' (Scalar v), InnerSpace v, LSpace w, HasBasis w, Scalar v ~ Scalar w) => Basis w -> v -> v +> w
- type HilbertSpace v = (LSpace v, InnerSpace v, DualVector v ~ v)
- type SimpleSpace v = (FiniteDimensional v, FiniteDimensional (DualVector v), SemiInner v, SemiInner (DualVector v), RealFrac' (Scalar v))
- type RealSpace v = (LinearSpace v, Scalar v ~ ℝ, TensorQuot v ℝ, (v ⨸ ℝ) ~ DualVector v, TensorQuot v v, (v ⨸ v) ~ ℝ)
- class (Num s, LinearSpace s, FreeVectorSpace s) => Num' s where
- type Fractional' s = (Num' s, Fractional s, Eq s, VectorSpace s)
- type RealFrac' s = (Fractional' s, IEEE s, InnerSpace s)
- type RealFloat' s = (RealFrac' s, Floating s)
- type LinearShowable v = (Show v, RieszDecomposable v)
- data ClosedScalarWitness s where
- ClosedScalarWitness :: (Scalar s ~ s, DualVector s ~ s) => ClosedScalarWitness s
- data TrivialTensorWitness s w where
- TrivialTensorWitness :: w ~ TensorProduct s w => TrivialTensorWitness s w
- data ScalarSpaceWitness v where
- ScalarSpaceWitness :: (Num' (Scalar v), Scalar (Scalar v) ~ Scalar v) => ScalarSpaceWitness v
- data DualSpaceWitness v where
- DualSpaceWitness :: (LinearSpace (Scalar v), DualVector (Scalar v) ~ Scalar v, LinearSpace (DualVector v), Scalar (DualVector v) ~ Scalar v, DualVector (DualVector v) ~ v) => DualSpaceWitness v
- data LinearManifoldWitness v where
- LinearManifoldWitness :: (Needle v ~ v, AffineSpace v, Diff v ~ v) => BoundarylessWitness v -> LinearManifoldWitness v
- relaxNorm :: forall v. SimpleSpace v => Norm v -> [v] -> Norm v
- transformNorm :: forall v w. (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Norm w -> Norm v
- transformVariance :: forall v w. (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Variance v -> Variance w
- findNormalLength :: forall s. RealFrac' s => Norm s -> Maybe s
- normalLength :: forall s. RealFrac' s => Norm s -> s
- summandSpaceNorms :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Norm (u, v) -> (Norm u, Norm v)
- sumSubspaceNorms :: forall u v. (LSpace u, LSpace v, Scalar u ~ Scalar v) => Norm u -> Norm v -> Norm (u, v)
- sharedNormSpanningSystem :: SimpleSpace v => Norm v -> Seminorm v -> [(DualVector v, Scalar v)]
- sharedSeminormSpanningSystem :: forall v. SimpleSpace v => Seminorm v -> Seminorm v -> [(DualVector v, Maybe (Scalar v))]
- sharedSeminormSpanningSystem' :: forall v. SimpleSpace v => Seminorm v -> Seminorm v -> [v]
- convexPolytopeHull :: forall v. SimpleSpace v => [v] -> [DualVector v]
- symmetricPolytopeOuterVertices :: forall v. SimpleSpace v => [DualVector v] -> [v]
Linear maps
This library deals with linear functions, i.e. functions f :: v -> w
that fulfill
f $ μ^*
u^+^
v ≡ μ ^* f u ^+^ f v ∀ u,v :: v; μ ::Scalar
v
Such functions form a cartesian monoidal category (in maths called
VectK).
This is implemented by PreArrow
, which is the
preferred interface for dealing with these mappings. The basic
“matrix operations” are then:
- Identity matrix:
id
- Matrix addition:
^+^
(linear maps form an ordinary vector space) - Matrix-matrix multiplication:
<<<
(or>>>
or.
) - Matrix-vector multiplication:
$
- Vertical matrix concatenation:
&&&
- Horizontal matrix concatenation:
⊕
(aka>+<
)
But linear mappings need not necessarily be implemented as matrices:
Function implementation
newtype LinearFunction s v w Source #
A linear map, represented simply as a Haskell function tagged with the type of scalar with respect to which it is linear. Many (sparse) linear mappings can actually be calculated much more efficiently if you don't represent them with any kind of matrix, but just as a function (which is after all, mathematically speaking, what a linear map foremostly is).
However, if you sum up many LinearFunction
s – which you can
simply do with the VectorSpace
instance – they will become ever
slower to calculate, because the summand-functions are actually computed
individually and only the results summed. That's where
LinearMap
is generally preferrable.
You can always convert between these equivalent categories using arr
.
Constructors
LinearFunction | |
Fields
|
Instances
type (-+>) v w = LinearFunction (Scalar w) v w Source #
Infix synonym of LinearFunction
, without explicit mention of the scalar type.
type Bilinear v w y = LinearFunction (Scalar v) v (LinearFunction (Scalar v) w y) Source #
A bilinear function is a linear function mapping to a linear function, or equivalently a 2-argument function that's linear in each argument independently. Note that this can not be uncurried to a linear function with a tuple argument (this would not be linear but quadratic).
lfun :: (EnhancedCat f (LinearFunction s), LinearSpace u, TensorSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => (u -> v) -> f u v Source #
Use a function as a linear map. This is only well-defined if the function is linear (this condition is not checked).
Tensor implementation
newtype LinearMap s v w Source #
The tensor product between one space's dual space and another space is the space spanned by vector–dual-vector pairs, in bra-ket notation written as
m = ∑ |w⟩⟨v|
Any linear mapping can be written as such a (possibly infinite) sum. The
TensorProduct
data structure only stores the linear independent parts
though; for simple finite-dimensional spaces this means e.g.
effectively boils down to an ordinary matrix type, namely an array of
column-vectors LinearMap
ℝ ℝ³ ℝ³|w⟩
.
(The ⟨v|
dual-vectors are then simply assumed to come from the canonical basis.)
For bigger spaces, the tensor product may be implemented in a more efficient
sparse structure; this can be defined in the TensorSpace
instance.
Constructors
LinearMap | |
Fields
|
Instances
type (+>) v w = LinearMap (Scalar v) v w Source #
Infix synonym for LinearMap
, without explicit mention of the scalar type.
(⊕) :: (u +> w) -> (v +> w) -> (u, v) +> w infixr 6 Source #
The dual operation to the tuple constructor, or rather to the
&&&
fanout operation: evaluate two (linear) functions in parallel
and sum up the results.
The typical use is to concatenate “row vectors” in a matrix definition.
adjoint :: forall v w. (LinearSpace v, LinearSpace w, Scalar v ~ Scalar w) => (v +> DualVector w) -+> (w +> DualVector v) Source #
For real matrices, this boils down to transpose
.
For free complex spaces it also incurs complex conjugation.
The signature can also be understood as
adjoint :: (v +> w) -> (DualVector w +> DualVector v)
Or
adjoint :: (DualVector v +> DualVector w) -> (w +> v)
But not (v+>w) -> (w+>v)
, in general (though in a Hilbert space, this too is
equivalent, via riesz
isomorphism).
Dual vectors
A
is a linear functional or
linear form on the vector space DualVector
vv
,
i.e. it is a linear function from the vector space into its scalar field.
However, these functions form themselves a vector space, known as the dual space.
In particular, the dual space of any InnerSpace
is isomorphic to the
space itself.
(More precisely: the continuous dual space of a
Hilbert space is isomorphic to
that Hilbert space itself; see the riesz
isomorphism.)
As a matter of fact, in many applications, no distinction is made between a
space and its dual. Indeed, we have for the basic LinearSpace
instances
, and DualVector
v ~ v<.>^
is simply defined as a scalar product.
In this case, a general LinearMap
is just a tensor product / matrix.
However, scalar products are often not as natural as they are made to look:
- A scalar product is only preserved under orthogonal transformations. It is not preserved under scalings, and certainly not under general linear transformations. This is very important in applications such as relativity theory (here, people talk about covariant vs contravariant tensors), but also relevant for more mundane manifolds like sphere surfaces: on such a surface, the natural symmetry transformations do generally not preserve a scalar product you might define.
- There may be more than one meaningful scalar product. For instance, the Sobolev space of weakly differentiable functions also permits the 𝐿² scalar product – each has different and useful properties.
Neither of this is a problem if we keep the dual space a separate type. Effectively, this enables the type system to prevent you from writing code that does not behave natural (i.e. that depends on a concrete choice of basis / scalar product).
For cases when you do have some given notion of orientation/scale in a vector space
and need it for an algorithm, you can always provide a Norm
, which is essentially
a reified scalar product.
Note that DualVector (DualVector v) ~ v
in any LSpace
: the double-dual
space is naturally isomorphic to the original space, by way of
v<.>^
dv ≡ dv<.>^
v
(<.>^) :: LinearSpace v => DualVector v -> v -> Scalar v infixr 7 Source #
(-+|>) :: (EnhancedCat f (LinearFunction s), LSpace u, LSpace v, Scalar u ~ s, Scalar v ~ s, Object f u, Object f v) => DualVector u -> v -> f u v infixr 7 Source #
Tensor spaces
Tensor products are most interesting because they can be used to implement linear mappings, but they also form a useful vector space on their own right.
Constructors
Tensor | |
Fields
|
Instances
type (⊗) v w = Tensor (Scalar v) v w infixl 7 Source #
Infix synonym for Tensor
, without explicit mention of the scalar type.
(⊗) :: forall v w. (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v, Num' (Scalar v)) => v -> w -> v ⊗ w infixl 7 Source #
Infix version of tensorProduct
.
Symmetric
newtype SymmetricTensor s v Source #
Constructors
SymTensor | |
Fields
|
Instances
squareV :: (Num' s, s ~ Scalar v) => TensorSpace v => v -> SymmetricTensor s v Source #
squareVs :: (Num' s, s ~ Scalar v) => TensorSpace v => [v] -> SymmetricTensor s v Source #
currySymBilin :: LinearSpace v => (v ⊗〃+> w) -+> (v +> (v +> w)) Source #
Norms
A norm is a way to quantify the magnitude/length of different vectors, even if they point in different directions.
In an InnerSpace
, a norm is always given by the scalar product,
but there are spaces without a canonical scalar product (or situations
in which this scalar product does not give the metric you want). Hence,
we let the functions like constructEigenSystem
, which depend on a norm
for orthonormalisation, accept a Norm
as an extra argument instead of
requiring InnerSpace
.
A positive (semi)definite symmetric bilinear form. This gives rise to a norm thus:
Norm
n|$|
v = √(n v<.>^
v)
Strictly speaking, this type is neither strong enough nor general enough to
deserve the name Norm
: it includes proper Seminorm
s (i.e. m|$|v ≡ 0
does
not guarantee v == zeroV
), but not actual norms such as the ℓ₁-norm on ℝⁿ
(Taxcab norm) or the supremum norm.
However, 𝐿₂-like norms are the only ones that can really be formulated without
any basis reference; and guaranteeing positive definiteness through the type
system is scarcely practical.
Constructors
Norm | |
Fields
|
type Seminorm v = Norm v Source #
A “norm” that may explicitly be degenerate, with m|$|v ⩵ 0
for some v ≠ zeroV
.
spanNorm :: forall v. LSpace v => [DualVector v] -> Seminorm v Source #
A seminorm defined by
‖v‖ = √(∑ᵢ ⟨dᵢ|v⟩²)
for some dual vectors dᵢ
. If given a complete basis of the dual space,
this generates a proper Norm
.
If the dᵢ
are a complete orthonormal system, you get the euclideanNorm
(in an inefficient form).
euclideanNorm :: HilbertSpace v => Norm v Source #
The canonical standard norm (2-norm) on inner-product / Hilbert spaces.
(|$|) :: (LSpace v, Floating (Scalar v)) => Seminorm v -> v -> Scalar v infixr 0 Source #
Use a Norm
to measure the length / norm of a vector.
euclideanNorm
|$| v ≡ √(v<.>
v)
normSq :: LSpace v => Seminorm v -> v -> Scalar v Source #
The squared norm. More efficient than |$|
because that needs to take
the square root.
(<$|) :: LSpace v => Norm v -> v -> DualVector v infixr 0 Source #
“Partially apply” a norm, yielding a dual vector (i.e. a linear form that accepts the second argument of the scalar product).
(euclideanNorm
<$|
v)<.>^
w ≡ v<.>
w
See also |&>
.
scaleNorm :: forall v. LSpace v => Scalar v -> Norm v -> Norm v Source #
Scale the result of a norm with the absolute of the given number.
scaleNorm μ n |$| v = abs μ * (n|$|v)
Equivalently, this scales the norm's unit ball by the reciprocal of that factor.
normSpanningSystem :: SimpleSpace v => Seminorm v -> [DualVector v] Source #
normSpanningSystem' :: (FiniteDimensional v, IEEE (Scalar v)) => Seminorm v -> [v] Source #
Variances
type Variance v = Norm (DualVector v) Source #
A multidimensional variance of points v
with some distribution can be
considered a norm on the dual space, quantifying for a dual vector dv
the
expectation value of (dv.^v)^2
.
spanVariance :: forall v. LSpace v => [v] -> Variance v Source #
(|&>) :: LSpace v => DualVector v -> Variance v -> v infixl 1 Source #
Flipped, “ket” version of <$|
.
v<.>^
(w |&>euclideanNorm
) ≡ v<.>
w
varianceSpanningSystem :: forall v. SimpleSpace v => Variance v -> [v] Source #
Inverse of spanVariance
. Equivalent to normSpanningSystem
on the dual space.
dualNorm :: SimpleSpace v => Norm v -> Variance v Source #
A proper norm induces a norm on the dual space – the “reciprocal norm”. (The orthonormal systems of the norm and its dual are mutually conjugate.) The dual norm of a seminorm is undefined.
dualNorm' :: forall v. SimpleSpace v => Variance v -> Norm v Source #
dualNorm
in the opposite direction. This is actually self-inverse;
with dualSpaceWitness
you can replace each with the other direction.
dependence :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Variance (u, v) -> u +> v Source #
Interpret a variance as a covariance between two subspaces, and
normalise it by the variance on u
. The result is effectively
the linear regression coefficient of a simple regression of the vectors
spanning the variance.
Utility
densifyNorm :: forall v. LSpace v => Norm v -> Norm v Source #
spanNorm
/ spanVariance
are inefficient if the number of vectors
is similar to the dimension of the space, or even larger than it.
Use this function to optimise the underlying operator to a dense
matrix representation.
wellDefinedNorm :: forall v. LinearSpace v => Norm v -> Maybe (Norm v) Source #
Like densifyNorm
, but also perform a “sanity check” to eliminate NaN etc. problems.
Solving linear equations
(\$) :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v -> u infixr 0 Source #
Inverse function application, aka solving of a linear system:
f\$
f$
v ≡ v f$
f\$
u ≡ u
If f
does not have full rank, the behaviour is undefined. However, it
does not need to be a proper isomorphism: the
first of the above equations is still fulfilled if only f
is injective
(overdetermined system) and the second if it is surjective.
If you want to solve for multiple RHS vectors, be sure to partially apply this operator to the linear map, like
map (f \$
) [v₁, v₂, ...]
Since most of the work is actually done in triangularising the operator, this may be much faster than
[f\$
v₁, f\$
v₂, ...]
pseudoInverse :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => (u +> v) -> v +> u Source #
roughDet :: (FiniteDimensional v, IEEE (Scalar v)) => (v +> v) -> Scalar v Source #
Approximation of the determinant.
linearRegressionW :: forall s x m y. (LinearSpace x, SimpleSpace y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => Norm y -> (x -> m +> y) -> [(x, y)] -> m Source #
Simple wrapper of linearRegression
.
linearRegression :: forall s x m y. (LinearSpace x, SimpleSpace y, SimpleSpace m, Scalar x ~ s, Scalar y ~ s, Scalar m ~ s, RealFrac' s) => (x -> m +> y) -> [(x, (y, Norm y))] -> LinearRegressionResult x y m Source #
data LinearRegressionResult x y m Source #
linearFit_χν² :: LinearRegressionResult x y m -> Scalar m Source #
How well the data uncertainties match the deviations from the model's
synthetic data.
χν² = 1ν · ∑ δy² σy²
Where ν
is the number of degrees of freedom (data values minus model
parameters), δy = m x - yd
is the deviation from given data to
the data the model would predict (for each sample point), and σy
is
the a-priori measurement uncertainty of the data points.
Values χν²>1
indicate that the data could not be described satisfyingly;
χν²≪1
suggests overfitting or that the data uncertainties have
been postulated too high.
http://adsabs.harvard.edu/abs/1997ieas.book.....T
If the model is exactly determined or even underdetermined (i.e. ν≤0
)
then χν²
is undefined.
linearFit_bestModel :: LinearRegressionResult x y m -> m Source #
The model that best corresponds to the data, in a least-squares
sense WRT the supplied norm on the data points. In other words,
this is the model that minimises ∑ δy² / σy²
.
linearFit_modelUncertainty :: LinearRegressionResult x y m -> Norm m Source #
Eigenvalue problems
eigen :: (FiniteDimensional v, HilbertSpace v, IEEE (Scalar v)) => (v +> v) -> [(Scalar v, v)] Source #
Simple automatic finding of the eigenvalues and -vectors of a Hermitian operator, in reasonable approximation.
This works by spanning a QR-stabilised Krylov basis with constructEigenSystem
until it is complete (roughEigenSystem
), and then properly decoupling the
system with finishEigenSystem
(based on two iterations of shifted Givens rotations).
This function is a tradeoff in performance vs. accuracy. Use constructEigenSystem
and finishEigenSystem
directly for more quickly computing a (perhaps incomplete)
approximation, or for more precise results.
Arguments
:: (LSpace v, RealFloat (Scalar v)) | |
=> Norm v | The notion of orthonormality. |
-> Scalar v | Error bound for deviations from eigen-ness. |
-> (v -+> v) | Operator to calculate the eigensystem of. Must be Hermitian WRT the scalar product defined by the given metric. |
-> [v] | Starting vector(s) for the power method. |
-> [[Eigenvector v]] | Infinite sequence of ever more accurate approximations to the eigensystem of the operator. |
Lazily compute the eigenbasis of a linear map. The algorithm is essentially a hybrid of Lanczos/Arnoldi style Krylov-spanning and QR-diagonalisation, which we don't do separately but interleave at each step.
The size of the eigen-subbasis increases with each step until the space's dimension is reached. (But the algorithm can also be used for infinite-dimensional spaces.)
roughEigenSystem :: (FiniteDimensional v, IEEE (Scalar v)) => Norm v -> (v +> v) -> [Eigenvector v] Source #
Find a system of vectors that approximate the eigensytem, in the sense that: each true eigenvalue is represented by an approximate one, and that is closer to the true value than all the other approximate EVs.
This function does not make any guarantees as to how well a single eigenvalue is approximated, though.
finishEigenSystem :: forall v. (LSpace v, RealFloat (Scalar v)) => Norm v -> [Eigenvector v] -> [Eigenvector v] Source #
data Eigenvector v Source #
Constructors
Eigenvector | |
Fields
|
Instances
(Show v, Show (Scalar v)) => Show (Eigenvector v) Source # | |
Defined in Math.LinearMap.Category Methods showsPrec :: Int -> Eigenvector v -> ShowS # show :: Eigenvector v -> String # showList :: [Eigenvector v] -> ShowS # |
The classes of suitable vector spaces
type LSpace v = (LinearSpace v, LinearSpace (Scalar v), LinearSpace (DualVector v), Num' (Scalar v)) Source #
The workhorse of this package: most functions here work on vector
spaces that fulfill the
constraint.LSpace
v
In summary, this is a VectorSpace
with an implementation for
,
for any other space TensorProduct
v ww
, and with a DualVector
space. This fulfills
(this constraint is encapsulated in
DualVector
(DualVector
v) ~ vDualSpaceWitness
).
To make a new space of yours an LSpace
, you must define instances of
TensorSpace
and LinearSpace
. In fact, LSpace
is equivalent to
LinearSpace
, but makes the condition explicit that the scalar and dual vectors
also form a linear space. LinearSpace
only stores that constraint in
dualSpaceWitness
(to avoid UndecidableSuperclasses).
class (VectorSpace v, PseudoAffine v) => TensorSpace v where Source #
Minimal complete definition
scalarSpaceWitness, linearManifoldWitness, zeroTensor, toFlatTensor, fromFlatTensor, tensorProduct, transposeTensor, fmapTensor, fzipTensorWith, coerceFmapTensorProduct, wellDefinedTensor
Associated Types
type TensorProduct v w :: * Source #
The internal representation of a Tensor
product.
For Euclidean spaces, this is generally constructed by replacing each s
scalar field in the v
vector with an entire w
vector. I.e., you have
then a “nested vector” or, if v
is a DualVector
/ “row vector”, a matrix.
Methods
scalarSpaceWitness :: ScalarSpaceWitness v Source #
linearManifoldWitness :: LinearManifoldWitness v Source #
zeroTensor :: (TensorSpace w, Scalar w ~ Scalar v) => v ⊗ w Source #
toFlatTensor :: v -+> (v ⊗ Scalar v) Source #
fromFlatTensor :: (v ⊗ Scalar v) -+> v Source #
addTensors :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source #
addTensors :: AdditiveGroup (TensorProduct v w) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source #
subtractTensors :: (TensorSpace v, TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source #
subtractTensors :: AdditiveGroup (TensorProduct v w) => (v ⊗ w) -> (v ⊗ w) -> v ⊗ w Source #
scaleTensor :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w) Source #
scaleTensor :: (VectorSpace (TensorProduct v w), Scalar (TensorProduct v w) ~ Scalar v) => Bilinear (Scalar v) (v ⊗ w) (v ⊗ w) Source #
negateTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (v ⊗ w) Source #
negateTensor :: AdditiveGroup (TensorProduct v w) => (v ⊗ w) -+> (v ⊗ w) Source #
tensorProduct :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear v w (v ⊗ w) Source #
tensorProducts :: (TensorSpace w, Scalar w ~ Scalar v) => [(v, w)] -> v ⊗ w Source #
transposeTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -+> (w ⊗ v) Source #
fmapTensor :: (TensorSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w -+> x) (v ⊗ w) (v ⊗ x) Source #
fzipTensorWith :: (TensorSpace u, TensorSpace w, TensorSpace x, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear ((w, x) -+> u) (v ⊗ w, v ⊗ x) (v ⊗ u) Source #
coerceFmapTensorProduct :: Functor p => p v -> Coercion a b -> Coercion (TensorProduct v a) (TensorProduct v b) Source #
wellDefinedVector :: v -> Maybe v Source #
“Sanity-check” a vector. This typically amounts to detecting any NaN components,
which should trigger a Nothing
result. Otherwise, the result should be Just
the input, but may also be optimised / memoised if applicable (i.e. for
function spaces).
wellDefinedVector :: Eq v => v -> Maybe v Source #
“Sanity-check” a vector. This typically amounts to detecting any NaN components,
which should trigger a Nothing
result. Otherwise, the result should be Just
the input, but may also be optimised / memoised if applicable (i.e. for
function spaces).
wellDefinedTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ w) -> Maybe (v ⊗ w) Source #
Instances
class (TensorSpace v, Num (Scalar v)) => LinearSpace v where Source #
The class of vector spaces v
for which
is well-implemented.LinearMap
s v w
Minimal complete definition
dualSpaceWitness, linearId, applyDualVector, applyLinear, tensorId, applyTensorFunctional, applyTensorLinMap
Associated Types
type DualVector v :: * Source #
Suitable representation of a linear map from the space v
to its field.
For the usual euclidean spaces, you can just define
.
(In this case, a dual vector will be just a “row vector” if you consider
DualVector
v = vv
-vectors as “column vectors”. LinearMap
will then effectively have
a matrix layout.)
Methods
dualSpaceWitness :: DualSpaceWitness v Source #
idTensor :: v ⊗ DualVector v Source #
sampleLinearFunction :: (TensorSpace w, Scalar v ~ Scalar w) => (v -+> w) -+> (v +> w) Source #
toLinearForm :: DualVector v -+> (v +> Scalar v) Source #
fromLinearForm :: (v +> Scalar v) -+> DualVector v Source #
coerceDoubleDual :: Coercion v (DualVector (DualVector v)) Source #
trace :: (v +> v) -+> Scalar v Source #
contractTensorMap :: (TensorSpace w, Scalar w ~ Scalar v) => (v +> (v ⊗ w)) -+> w Source #
contractMapTensor :: (TensorSpace w, Scalar w ~ Scalar v) => (v ⊗ (v +> w)) -+> w Source #
contractTensorFn :: forall w. (TensorSpace w, Scalar w ~ Scalar v) => (v -+> (v ⊗ w)) -+> w Source #
contractLinearMapAgainst :: (LinearSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) (w -+> v) (Scalar v) Source #
applyDualVector :: LinearSpace v => Bilinear (DualVector v) v (Scalar v) Source #
applyLinear :: (TensorSpace w, Scalar w ~ Scalar v) => Bilinear (v +> w) v w Source #
composeLinear :: (LinearSpace w, TensorSpace x, Scalar w ~ Scalar v, Scalar x ~ Scalar v) => Bilinear (w +> x) (v +> w) (v +> x) Source #
tensorId :: (LinearSpace w, Scalar w ~ Scalar v) => (v ⊗ w) +> (v ⊗ w) Source #
applyTensorFunctional :: (LinearSpace u, Scalar u ~ Scalar v) => Bilinear (DualVector (v ⊗ u)) (v ⊗ u) (Scalar v) Source #
applyTensorLinMap :: (LinearSpace u, TensorSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v) => Bilinear ((v ⊗ u) +> w) (v ⊗ u) w Source #
Instances
Orthonormal systems
class LinearSpace v => SemiInner v where Source #
SemiInner
is the class of vector spaces with finite subspaces in which
you can define a basis that can be used to project from the whole space
into the subspace. The usual application is for using a kind of
Galerkin method to
give an approximate solution (see \$
) to a linear equation in a possibly
infinite-dimensional space.
Of course, this also works for spaces which are already finite-dimensional themselves.
Minimal complete definition
dualBasisCandidates, tensorDualBasisCandidates, symTensorDualBasisCandidates
Methods
dualBasisCandidates :: [(Int, v)] -> Forest (Int, DualVector v) Source #
Lazily enumerate choices of a basis of functionals that can be made dual
to the given vectors, in order of preference (which roughly means, large in
the normal direction.) I.e., if the vector 𝑣
is assigned early to the
dual vector 𝑣'
, then (𝑣' $ 𝑣)
should be large and all the other products
comparably small.
The purpose is that we should be able to make this basis orthonormal with a ~Gaussian-elimination approach, in a way that stays numerically stable. This is otherwise known as the choice of a pivot element.
For simple finite-dimensional array-vectors, you can easily define this
method using cartesianDualBasisCandidates
.
tensorDualBasisCandidates :: (SemiInner w, Scalar w ~ Scalar v) => [(Int, v ⊗ w)] -> Forest (Int, DualVector (v ⊗ w)) Source #
symTensorDualBasisCandidates :: [(Int, SymmetricTensor (Scalar v) v)] -> Forest (Int, SymmetricTensor (Scalar v) (DualVector v)) Source #
symTensorTensorDualBasisCandidates :: forall w. (SemiInner w, Scalar w ~ Scalar v) => [(Int, SymmetricTensor (Scalar v) v ⊗ w)] -> Forest (Int, SymmetricTensor (Scalar v) v +> DualVector w) Source #
Instances
cartesianDualBasisCandidates Source #
Arguments
:: [DualVector v] | Set of canonical basis functionals. |
-> (v -> [ℝ]) | Decompose a vector in absolute value components. the list indices should correspond to those in the functional list. |
-> [(Int, v)] -> Forest (Int, DualVector v) | Suitable definition of |
embedFreeSubspace :: forall v t r. (HasCallStack, SemiInner v, RealFrac' (Scalar v), Traversable t) => t v -> Maybe (ReifiedLens' v (t (Scalar v))) Source #
Finite baseis
class LSpace v => FiniteDimensional v where Source #
Minimal complete definition
entireBasis, enumerateSubBasis, decomposeLinMap, decomposeLinMapWithin, recomposeSB, recomposeSBTensor, recomposeLinMap, recomposeContraLinMap, recomposeContraLinMapTensor, uncanonicallyFromDual, uncanonicallyToDual
Associated Types
Whereas Basis
-values refer to a single basis vector, a single
SubBasis
value represents a collection of such basis vectors,
which can be used to associate a vector with a list of coefficients.
For spaces with a canonical finite basis, SubBasis
does not actually
need to contain any information, it can simply have the full finite
basis as its only value. Even for large sparse spaces, it should only
have a very coarse structure that can be shared by many vectors.
Methods
entireBasis :: SubBasis v Source #
enumerateSubBasis :: SubBasis v -> [v] Source #
subbasisDimension :: SubBasis v -> Int Source #
decomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => (v +> w) -> (SubBasis v, DList w) Source #
Split up a linear map in “column vectors” WRT some suitable basis.
decomposeLinMapWithin :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> (v +> w) -> Either (SubBasis v, DList w) (DList w) Source #
Expand in the given basis, if possible. Else yield a superbasis of the given one, in which this is possible, and the decomposition therein.
recomposeSB :: SubBasis v -> [Scalar v] -> (v, [Scalar v]) Source #
Assemble a vector from coefficients in some basis. Return any excess coefficients.
recomposeSBTensor :: (FiniteDimensional w, Scalar w ~ Scalar v) => SubBasis v -> SubBasis w -> [Scalar v] -> (v ⊗ w, [Scalar v]) Source #
recomposeLinMap :: (LSpace w, Scalar w ~ Scalar v) => SubBasis v -> [w] -> (v +> w, [w]) Source #
recomposeContraLinMap :: (LinearSpace w, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (DualVector v) -> v +> w Source #
Given a function that interprets a coefficient-container as a vector representation, build a linear function mapping to that space.
recomposeContraLinMapTensor :: (FiniteDimensional u, LinearSpace w, Scalar u ~ Scalar v, Scalar w ~ Scalar v, Functor f) => (f (Scalar w) -> w) -> f (v +> DualVector u) -> (v ⊗ u) +> w Source #
uncanonicallyFromDual :: DualVector v -+> v Source #
The existance of a finite basis gives us an isomorphism between a space and its dual space. Note that this isomorphism is not natural (i.e. it depends on the actual choice of basis, unlike everything else in this library).
uncanonicallyToDual :: v -+> DualVector v Source #
Instances
Utility
Linear primitives
addV :: AdditiveGroup w => LinearFunction s (w, w) w Source #
bilinearFunction :: (v -> w -> y) -> Bilinear v w y Source #
Tensors with basis decomposition
(.⊗) :: (TensorSpace v, HasBasis v, TensorSpace w, Num' (Scalar v), Scalar v ~ Scalar w) => Basis v -> w -> v ⊗ w infixr 7 Source #
Hilbert space operations
(·) :: TensorQuot v w => (v ⨸ w) -> v -> w infixl 7 Source #
Generalised multiplication operation. This subsumes <.>^
and *^
.
For scalars therefore also *
, and for InnerSpace
, <.>
.
riesz :: forall v. (FiniteDimensional v, InnerSpace v) => DualVector v -+> v Source #
The Riesz representation theorem provides an isomorphism between a Hilbert space and its (continuous) dual space.
coRiesz :: forall v. (LSpace v, InnerSpace v) => v -+> DualVector v Source #
showsPrecAsRiesz :: forall v. (FiniteDimensional v, InnerSpace v, Show v, HasBasis (Scalar v), Basis (Scalar v) ~ ()) => Int -> DualSpace v -> ShowS Source #
Functions are generally a pain to display, but since linear functionals
in a Hilbert space can be represented by vectors in that space,
this can be used for implementing a Show
instance.
(.<) :: (FiniteDimensional v, Num' (Scalar v), InnerSpace v, LSpace w, HasBasis w, Scalar v ~ Scalar w) => Basis w -> v -> v +> w infixl 7 Source #
Outer product of a general v
-vector and a basis element from w
.
Note that this operation is in general pretty inefficient; it is
provided mostly to lay out matrix definitions neatly.
Constraint synonyms
type HilbertSpace v = (LSpace v, InnerSpace v, DualVector v ~ v) Source #
type SimpleSpace v = (FiniteDimensional v, FiniteDimensional (DualVector v), SemiInner v, SemiInner (DualVector v), RealFrac' (Scalar v)) Source #
type RealSpace v = (LinearSpace v, Scalar v ~ ℝ, TensorQuot v ℝ, (v ⨸ ℝ) ~ DualVector v, TensorQuot v v, (v ⨸ v) ~ ℝ) Source #
A space in which you can use ·
both for scaling with a real number,
and as dot-product for obtaining such a number.
class (Num s, LinearSpace s, FreeVectorSpace s) => Num' s where Source #
Minimal complete definition
Nothing
Methods
closedScalarWitness :: ClosedScalarWitness s Source #
closedScalarWitness :: (Scalar s ~ s, DualVector s ~ s) => ClosedScalarWitness s Source #
trivialTensorWitness :: TrivialTensorWitness s w Source #
trivialTensorWitness :: w ~ TensorProduct s w => TrivialTensorWitness s w Source #
type Fractional' s = (Num' s, Fractional s, Eq s, VectorSpace s) Source #
type RealFrac' s = (Fractional' s, IEEE s, InnerSpace s) Source #
type RealFloat' s = (RealFrac' s, Floating s) Source #
type LinearShowable v = (Show v, RieszDecomposable v) Source #
Double-dual, scalar-scalar etc. identity
data ClosedScalarWitness s where Source #
Constructors
ClosedScalarWitness :: (Scalar s ~ s, DualVector s ~ s) => ClosedScalarWitness s |
data TrivialTensorWitness s w where Source #
Constructors
TrivialTensorWitness :: w ~ TensorProduct s w => TrivialTensorWitness s w |
data ScalarSpaceWitness v where Source #
Constructors
ScalarSpaceWitness :: (Num' (Scalar v), Scalar (Scalar v) ~ Scalar v) => ScalarSpaceWitness v |
data DualSpaceWitness v where Source #
Constructors
DualSpaceWitness :: (LinearSpace (Scalar v), DualVector (Scalar v) ~ Scalar v, LinearSpace (DualVector v), Scalar (DualVector v) ~ Scalar v, DualVector (DualVector v) ~ v) => DualSpaceWitness v |
data LinearManifoldWitness v where Source #
Constructors
LinearManifoldWitness :: (Needle v ~ v, AffineSpace v, Diff v ~ v) => BoundarylessWitness v -> LinearManifoldWitness v |
Misc
relaxNorm :: forall v. SimpleSpace v => Norm v -> [v] -> Norm v Source #
Modify a norm in such a way that the given vectors lie within its unit ball. (Not optimally – the unit ball may be bigger than necessary.)
transformNorm :: forall v w. (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Norm w -> Norm v Source #
transformVariance :: forall v w. (LSpace v, LSpace w, Scalar v ~ Scalar w) => (v +> w) -> Variance v -> Variance w Source #
findNormalLength :: forall s. RealFrac' s => Norm s -> Maybe s Source #
The unique positive number whose norm is 1 (if the norm is not constant zero).
normalLength :: forall s. RealFrac' s => Norm s -> s Source #
Unsafe version of findNormalLength
, only works reliable if the norm
is actually positive definite.
summandSpaceNorms :: forall u v. (SimpleSpace u, SimpleSpace v, Scalar u ~ Scalar v) => Norm (u, v) -> (Norm u, Norm v) Source #
sumSubspaceNorms :: forall u v. (LSpace u, LSpace v, Scalar u ~ Scalar v) => Norm u -> Norm v -> Norm (u, v) Source #
sharedNormSpanningSystem :: SimpleSpace v => Norm v -> Seminorm v -> [(DualVector v, Scalar v)] Source #
For any two norms, one can find a system of co-vectors that, with suitable
coefficients, spans either of them: if shSys = sharedNormSpanningSystem n₀ n₁
,
then
n₀ = spanNorm
$ fst$shSys
and
n₁ = spanNorm
[dv^*η | (dv,η)<-shSys]
A rather crude approximation (roughEigenSystem
) is used in this function, so do
not expect the above equations to hold with great accuracy.
sharedSeminormSpanningSystem :: forall v. SimpleSpace v => Seminorm v -> Seminorm v -> [(DualVector v, Maybe (Scalar v))] Source #
Like 'sharedNormSpanningSystem n₀ n₁', but allows either of the norms to be singular.
n₀ = spanNorm
[dv | (dv, Just _)<-shSys]
and
n₁ = spanNorm
$ [dv^*η | (dv, Just η)<-shSys]
++ [ dv | (dv, Nothing)<-shSys]
You may also interpret a Nothing
here as an “infinite eigenvalue”, i.e.
it is so small as an spanning vector of n₀
that you would need to scale it
by ∞ to use it for spanning n₁
.
sharedSeminormSpanningSystem' :: forall v. SimpleSpace v => Seminorm v -> Seminorm v -> [v] Source #
A system of vectors which are orthogonal with respect to both of the given seminorms. (In general they are not orthonormal to either of them.)
convexPolytopeHull :: forall v. SimpleSpace v => [v] -> [DualVector v] Source #
symmetricPolytopeOuterVertices :: forall v. SimpleSpace v => [DualVector v] -> [v] Source #