diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..3c3cf45 --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +# binaries +*.o +*.a +*.so +*.dylib +examples/ex_test +sandbox/* + +# Apple stuffs +*.xcodeproj +.DS_Store + +# VIM +*.swp + +# debug +*.dSYM + +# autotools +*.la +*.lo +*.Plo +autom4te.cache/* +config.h* +Makefile.in +Makefile +configure +libtool +.buildutils/m4/* +.buildutils/* +latan/.libs/* +latan/.deps/* +examples/.deps/* +examples/.libs/* +utils/.deps/* +utils/.libs/* +aclocal.m4 +config.log +config.status +stamp-h1 +myconfig.sh + diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..e69de29 diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..e69de29 diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000..6e90e07 --- /dev/null +++ b/INSTALL @@ -0,0 +1,370 @@ +Installation Instructions +************************* + +Copyright (C) 1994-1996, 1999-2002, 2004-2012 Free Software Foundation, +Inc. + + Copying and distribution of this file, with or without modification, +are permitted in any medium without royalty provided the copyright +notice and this notice are preserved. This file is offered as-is, +without warranty of any kind. + +Basic Installation +================== + + Briefly, the shell commands `./configure; make; make install' should +configure, build, and install this package. The following +more-detailed instructions are generic; see the `README' file for +instructions specific to this package. Some packages provide this +`INSTALL' file but do not implement all of the features documented +below. The lack of an optional feature in a given package is not +necessarily a bug. More recommendations for GNU packages can be found +in *note Makefile Conventions: (standards)Makefile Conventions. + + The `configure' shell script attempts to guess correct values for +various system-dependent variables used during compilation. It uses +those values to create a `Makefile' in each directory of the package. +It may also create one or more `.h' files containing system-dependent +definitions. Finally, it creates a shell script `config.status' that +you can run in the future to recreate the current configuration, and a +file `config.log' containing compiler output (useful mainly for +debugging `configure'). + + It can also use an optional file (typically called `config.cache' +and enabled with `--cache-file=config.cache' or simply `-C') that saves +the results of its tests to speed up reconfiguring. Caching is +disabled by default to prevent problems with accidental use of stale +cache files. + + If you need to do unusual things to compile the package, please try +to figure out how `configure' could check whether to do them, and mail +diffs or instructions to the address given in the `README' so they can +be considered for the next release. If you are using the cache, and at +some point `config.cache' contains results you don't want to keep, you +may remove or edit it. + + The file `configure.ac' (or `configure.in') is used to create +`configure' by a program called `autoconf'. You need `configure.ac' if +you want to change it or regenerate `configure' using a newer version +of `autoconf'. + + The simplest way to compile this package is: + + 1. `cd' to the directory containing the package's source code and type + `./configure' to configure the package for your system. + + Running `configure' might take a while. While running, it prints + some messages telling which features it is checking for. + + 2. Type `make' to compile the package. + + 3. Optionally, type `make check' to run any self-tests that come with + the package, generally using the just-built uninstalled binaries. + + 4. Type `make install' to install the programs and any data files and + documentation. When installing into a prefix owned by root, it is + recommended that the package be configured and built as a regular + user, and only the `make install' phase executed with root + privileges. + + 5. Optionally, type `make installcheck' to repeat any self-tests, but + this time using the binaries in their final installed location. + This target does not install anything. Running this target as a + regular user, particularly if the prior `make install' required + root privileges, verifies that the installation completed + correctly. + + 6. You can remove the program binaries and object files from the + source code directory by typing `make clean'. To also remove the + files that `configure' created (so you can compile the package for + a different kind of computer), type `make distclean'. There is + also a `make maintainer-clean' target, but that is intended mainly + for the package's developers. If you use it, you may have to get + all sorts of other programs in order to regenerate files that came + with the distribution. + + 7. Often, you can also type `make uninstall' to remove the installed + files again. In practice, not all packages have tested that + uninstallation works correctly, even though it is required by the + GNU Coding Standards. + + 8. Some packages, particularly those that use Automake, provide `make + distcheck', which can by used by developers to test that all other + targets like `make install' and `make uninstall' work correctly. + This target is generally not run by end users. + +Compilers and Options +===================== + + Some systems require unusual options for compilation or linking that +the `configure' script does not know about. Run `./configure --help' +for details on some of the pertinent environment variables. + + You can give `configure' initial values for configuration parameters +by setting variables in the command line or in the environment. Here +is an example: + + ./configure CC=c99 CFLAGS=-g LIBS=-lposix + + *Note Defining Variables::, for more details. + +Compiling For Multiple Architectures +==================================== + + You can compile the package for more than one kind of computer at the +same time, by placing the object files for each architecture in their +own directory. To do this, you can use GNU `make'. `cd' to the +directory where you want the object files and executables to go and run +the `configure' script. `configure' automatically checks for the +source code in the directory that `configure' is in and in `..'. This +is known as a "VPATH" build. + + With a non-GNU `make', it is safer to compile the package for one +architecture at a time in the source code directory. After you have +installed the package for one architecture, use `make distclean' before +reconfiguring for another architecture. + + On MacOS X 10.5 and later systems, you can create libraries and +executables that work on multiple system types--known as "fat" or +"universal" binaries--by specifying multiple `-arch' options to the +compiler but only a single `-arch' option to the preprocessor. Like +this: + + ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ + CPP="gcc -E" CXXCPP="g++ -E" + + This is not guaranteed to produce working output in all cases, you +may have to build one architecture at a time and combine the results +using the `lipo' tool if you have problems. + +Installation Names +================== + + By default, `make install' installs the package's commands under +`/usr/local/bin', include files under `/usr/local/include', etc. You +can specify an installation prefix other than `/usr/local' by giving +`configure' the option `--prefix=PREFIX', where PREFIX must be an +absolute file name. + + You can specify separate installation prefixes for +architecture-specific files and architecture-independent files. If you +pass the option `--exec-prefix=PREFIX' to `configure', the package uses +PREFIX as the prefix for installing programs and libraries. +Documentation and other data files still use the regular prefix. + + In addition, if you use an unusual directory layout you can give +options like `--bindir=DIR' to specify different values for particular +kinds of files. Run `configure --help' for a list of the directories +you can set and what kinds of files go in them. In general, the +default for these options is expressed in terms of `${prefix}', so that +specifying just `--prefix' will affect all of the other directory +specifications that were not explicitly provided. + + The most portable way to affect installation locations is to pass the +correct locations to `configure'; however, many packages provide one or +both of the following shortcuts of passing variable assignments to the +`make install' command line to change installation locations without +having to reconfigure or recompile. + + The first method involves providing an override variable for each +affected directory. For example, `make install +prefix=/alternate/directory' will choose an alternate location for all +directory configuration variables that were expressed in terms of +`${prefix}'. Any directories that were specified during `configure', +but not in terms of `${prefix}', must each be overridden at install +time for the entire installation to be relocated. The approach of +makefile variable overrides for each directory variable is required by +the GNU Coding Standards, and ideally causes no recompilation. +However, some platforms have known limitations with the semantics of +shared libraries that end up requiring recompilation when using this +method, particularly noticeable in packages that use GNU Libtool. + + The second method involves providing the `DESTDIR' variable. For +example, `make install DESTDIR=/alternate/directory' will prepend +`/alternate/directory' before all installation names. The approach of +`DESTDIR' overrides is not required by the GNU Coding Standards, and +does not work on platforms that have drive letters. On the other hand, +it does better at avoiding recompilation issues, and works well even +when some directory options were not specified in terms of `${prefix}' +at `configure' time. + +Optional Features +================= + + If the package supports it, you can cause programs to be installed +with an extra prefix or suffix on their names by giving `configure' the +option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. + + Some packages pay attention to `--enable-FEATURE' options to +`configure', where FEATURE indicates an optional part of the package. +They may also pay attention to `--with-PACKAGE' options, where PACKAGE +is something like `gnu-as' or `x' (for the X Window System). The +`README' should mention any `--enable-' and `--with-' options that the +package recognizes. + + For packages that use the X Window System, `configure' can usually +find the X include and library files automatically, but if it doesn't, +you can use the `configure' options `--x-includes=DIR' and +`--x-libraries=DIR' to specify their locations. + + Some packages offer the ability to configure how verbose the +execution of `make' will be. For these packages, running `./configure +--enable-silent-rules' sets the default to minimal output, which can be +overridden with `make V=1'; while running `./configure +--disable-silent-rules' sets the default to verbose, which can be +overridden with `make V=0'. + +Particular systems +================== + + On HP-UX, the default C compiler is not ANSI C compatible. If GNU +CC is not installed, it is recommended to use the following options in +order to use an ANSI C compiler: + + ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" + +and if that doesn't work, install pre-built binaries of GCC for HP-UX. + + HP-UX `make' updates targets which have the same time stamps as +their prerequisites, which makes it generally unusable when shipped +generated files such as `configure' are involved. Use GNU `make' +instead. + + On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot +parse its `' header file. The option `-nodtk' can be used as +a workaround. If GNU CC is not installed, it is therefore recommended +to try + + ./configure CC="cc" + +and if that doesn't work, try + + ./configure CC="cc -nodtk" + + On Solaris, don't put `/usr/ucb' early in your `PATH'. This +directory contains several dysfunctional programs; working variants of +these programs are available in `/usr/bin'. So, if you need `/usr/ucb' +in your `PATH', put it _after_ `/usr/bin'. + + On Haiku, software installed for all users goes in `/boot/common', +not `/usr/local'. It is recommended to use the following options: + + ./configure --prefix=/boot/common + +Specifying the System Type +========================== + + There may be some features `configure' cannot figure out +automatically, but needs to determine by the type of machine the package +will run on. Usually, assuming the package is built to be run on the +_same_ architectures, `configure' can figure that out, but if it prints +a message saying it cannot guess the machine type, give it the +`--build=TYPE' option. TYPE can either be a short name for the system +type, such as `sun4', or a canonical name which has the form: + + CPU-COMPANY-SYSTEM + +where SYSTEM can have one of these forms: + + OS + KERNEL-OS + + See the file `config.sub' for the possible values of each field. If +`config.sub' isn't included in this package, then this package doesn't +need to know the machine type. + + If you are _building_ compiler tools for cross-compiling, you should +use the option `--target=TYPE' to select the type of system they will +produce code for. + + If you want to _use_ a cross compiler, that generates code for a +platform different from the build platform, you should specify the +"host" platform (i.e., that on which the generated programs will +eventually be run) with `--host=TYPE'. + +Sharing Defaults +================ + + If you want to set default values for `configure' scripts to share, +you can create a site shell script called `config.site' that gives +default values for variables like `CC', `cache_file', and `prefix'. +`configure' looks for `PREFIX/share/config.site' if it exists, then +`PREFIX/etc/config.site' if it exists. Or, you can set the +`CONFIG_SITE' environment variable to the location of the site script. +A warning: not all `configure' scripts look for a site script. + +Defining Variables +================== + + Variables not defined in a site shell script can be set in the +environment passed to `configure'. However, some packages may run +configure again during the build, and the customized values of these +variables may be lost. In order to avoid this problem, you should set +them in the `configure' command line, using `VAR=value'. For example: + + ./configure CC=/usr/local2/bin/gcc + +causes the specified `gcc' to be used as the C compiler (unless it is +overridden in the site shell script). + +Unfortunately, this technique does not work for `CONFIG_SHELL' due to +an Autoconf limitation. Until the limitation is lifted, you can use +this workaround: + + CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash + +`configure' Invocation +====================== + + `configure' recognizes the following options to control how it +operates. + +`--help' +`-h' + Print a summary of all of the options to `configure', and exit. + +`--help=short' +`--help=recursive' + Print a summary of the options unique to this package's + `configure', and exit. The `short' variant lists options used + only in the top level, while the `recursive' variant lists options + also present in any nested packages. + +`--version' +`-V' + Print the version of Autoconf used to generate the `configure' + script, and exit. + +`--cache-file=FILE' + Enable the cache: use and save the results of the tests in FILE, + traditionally `config.cache'. FILE defaults to `/dev/null' to + disable caching. + +`--config-cache' +`-C' + Alias for `--cache-file=config.cache'. + +`--quiet' +`--silent' +`-q' + Do not print messages saying which checks are being made. To + suppress all normal output, redirect it to `/dev/null' (any error + messages will still be shown). + +`--srcdir=DIR' + Look for the package's source code in directory DIR. Usually + `configure' can determine that directory automatically. + +`--prefix=DIR' + Use DIR as the installation prefix. *note Installation Names:: + for more details, including other options available for fine-tuning + the installation locations. + +`--no-create' +`-n' + Run the configure checks, but stop before creating any output + files. + +`configure' also accepts some other, not widely useful, options. Run +`configure --help' for more details. diff --git a/LatAnalyze3.xcodeproj/project.pbxproj b/LatAnalyze3.xcodeproj/project.pbxproj deleted file mode 100644 index 0fe69aa..0000000 --- a/LatAnalyze3.xcodeproj/project.pbxproj +++ /dev/null @@ -1,67 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 46; - objects = { - -/* Begin PBXGroup section */ - 69C93A9E172FFBC00041B3C8 = { - isa = PBXGroup; - children = ( - ); - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXProject section */ - 69C93A9F172FFBC00041B3C8 /* Project object */ = { - isa = PBXProject; - attributes = { - LastUpgradeCheck = 0460; - }; - buildConfigurationList = 69C93AA2172FFBC00041B3C8 /* Build configuration list for PBXProject "LatAnalyze3" */; - compatibilityVersion = "Xcode 3.2"; - developmentRegion = English; - hasScannedForEncodings = 0; - knownRegions = ( - en, - ); - mainGroup = 69C93A9E172FFBC00041B3C8; - projectDirPath = ""; - projectRoot = ""; - targets = ( - ); - }; -/* End PBXProject section */ - -/* Begin XCBuildConfiguration section */ - 69C93AA3172FFBC00041B3C8 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - }; - name = Debug; - }; - 69C93AA4172FFBC00041B3C8 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 69C93AA2172FFBC00041B3C8 /* Build configuration list for PBXProject "LatAnalyze3" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 69C93AA3172FFBC00041B3C8 /* Debug */, - 69C93AA4172FFBC00041B3C8 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - }; - rootObject = 69C93A9F172FFBC00041B3C8 /* Project object */; -} diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 0000000..498367c --- /dev/null +++ b/Makefile.am @@ -0,0 +1,3 @@ +SUBDIRS = latan examples + +ACLOCAL_AMFLAGS = -I .buildutils/m4 diff --git a/NEWS b/NEWS new file mode 100644 index 0000000..e69de29 diff --git a/README b/README new file mode 100644 index 0000000..e69de29 diff --git a/acinclude.m4 b/acinclude.m4 new file mode 100644 index 0000000..cf791ad --- /dev/null +++ b/acinclude.m4 @@ -0,0 +1,94 @@ +AC_DEFUN([AX_GCC_OPTION], [ +AC_REQUIRE([AC_PROG_CC]) + +AC_MSG_CHECKING([if gcc accepts $1 option]) + +AS_IF([ test "x$GCC" = "xyes" ],[ +AS_IF([ test -z "$3" ],[ +ax_gcc_option_test="int main() +{ +return 0; +}" +],[ +ax_gcc_option_test="$3" +]) + +# Dump the test program to file +cat < conftest.c +$ax_gcc_option_test +EOF + +# Dump back the file to the log, useful for debugging purposes +AC_TRY_COMMAND(cat conftest.c 1>&AS_MESSAGE_LOG_FD) + +AS_IF([ AC_TRY_COMMAND($CC $2 $1 -c conftest.c 1>&AS_MESSAGE_LOG_FD) ],[ +AC_MSG_RESULT([yes]) +$4 +],[ +AC_MSG_RESULT([no]) +$5 +]) +],[ +AC_MSG_RESULT([no gcc available]) +]) +]) + +AC_DEFUN([AX_GCC_VERSION], [ +GCC_VERSION="" +AX_GCC_OPTION([-dumpversion],[],[],[ +ax_gcc_version_option=yes +],[ +ax_gcc_version_option=no +]) +AS_IF([test "x$GCC" = "xyes"],[ +AS_IF([test "x$ax_gcc_version_option" != "xno"],[ +AC_CACHE_CHECK([gcc version],[ax_cv_gcc_version],[ +ax_cv_gcc_version="`$CC -dumpversion`" +AS_IF([test "x$ax_cv_gcc_version" = "x"],[ +ax_cv_gcc_version="" +]) +]) +GCC_VERSION=$ax_cv_gcc_version +]) +]) +AC_SUBST([GCC_VERSION]) +]) + +AC_DEFUN([AX_GXX_VERSION], [ +GXX_VERSION="" +AX_GCC_OPTION([-dumpversion],[],[],[ +ax_gcc_version_option=yes +],[ +ax_gcc_version_option=no +]) +AS_IF([test "x$GXX" = "xyes"],[ +AS_IF([test "x$ax_gxx_version_option" != "no"],[ +AC_CACHE_CHECK([gxx version],[ax_cv_gxx_version],[ +ax_cv_gxx_version="`$CXX -dumpversion`" +AS_IF([test "x$ax_cv_gxx_version" = "x"],[ +ax_cv_gxx_version="" +]) +]) +GXX_VERSION=$ax_cv_gxx_version +]) +]) +AC_SUBST([GXX_VERSION]) +]) + +AC_DEFUN([AX_COMPILER_VENDOR], +[ +AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, +[ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=unknown +# note: don't check for gcc first since some other compilers define __GNUC__ +for ventest in intel:__ICC,__ECC,__INTEL_COMPILER ibm:__xlc__,__xlC__,__IBMC__,__IBMCPP__ pathscale:__PATHCC__,__PATHSCALE__ gnu:__GNUC__ sun:__SUNPRO_C,__SUNPRO_CC hp:__HP_cc,__HP_aCC dec:__DECC,__DECCXX,__DECC_VER,__DECCXX_VER borland:__BORLANDC__,__TURBOC__ comeau:__COMO__ cray:_CRAYC kai:__KCC lcc:__LCC__ metrowerks:__MWERKS__ sgi:__sgi,sgi microsoft:_MSC_VER watcom:__WATCOMC__ portland:__PGI; do +vencpp="defined("`echo $ventest | cut -d: -f2 | sed 's/,/) || defined(/g'`")" +AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,[ +#if !($vencpp) +thisisanerror; +#endif +])], [ax_cv_]_AC_LANG_ABBREV[_compiler_vendor=`echo $ventest | cut -d: -f1`; break]) +done +]) +]) + + diff --git a/bootstrap.sh b/bootstrap.sh new file mode 100755 index 0000000..2d8caa3 --- /dev/null +++ b/bootstrap.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +rm -rf .buildutils +mkdir -p .buildutils/m4 +autoreconf -fvi diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..112c721 --- /dev/null +++ b/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +PREFIX=`cat Makefile | grep '^prefix =' | awk '{print $3}'` +case $1 in + '') + echo '-- building...' + make -j3 + echo '-- installing...' + make uninstall 1>/dev/null + make install 1>/dev/null + if [[ `basename \`pwd\`` == "latan" ]] + then + echo '-- creating debug symbols...' + dsymutil .libs/liblatan.0.dylib -o ${PREFIX}/lib/liblatan.0.dylib.dSYM + fi;; + 'clean') + echo '-- cleaning...' + make -j3 clean;; + *) + echo 'error: unknown action' 1>&2 + exit 1;; +esac diff --git a/configure.ac b/configure.ac new file mode 100644 index 0000000..d1ee0b6 --- /dev/null +++ b/configure.ac @@ -0,0 +1,59 @@ +# Process this file with autoconf to produce a configure script. + +# Initialization +AC_PREREQ([2.64]) +AC_INIT([LatAnalyze],[3.0alpha1],[antonin.portelli@me.com],[latan]) +AC_CONFIG_AUX_DIR([.buildutils]) +AC_CONFIG_SRCDIR([latan/Global.cpp]) +AC_CONFIG_SRCDIR([examples/ex_test.cpp]) +AC_CONFIG_MACRO_DIR([.buildutils/m4]) +AM_INIT_AUTOMAKE([-Wall -Werror]) +AM_SILENT_RULES([yes]) +AC_CONFIG_HEADERS([config.h]) + +# Checks for programs. +AC_PROG_CXX +AC_PROG_AWK +AC_PROG_CC +AC_PROG_CPP +AC_PROG_INSTALL +AC_PROG_LN_S +AC_PROG_MAKE_SET +m4_ifdef([AM_PROG_AR],[AM_PROG_AR]) + +LT_INIT + +# Get compilers informations +AX_COMPILER_VENDOR +AC_DEFINE_UNQUOTED([C_COMP_VENDOR],["$ax_cv_c_compiler_vendor"], + [vendor of C compiler that will compile the code]) +AM_CONDITIONAL([CC_GNU],[test $ax_cv_c_compiler_vendor = "gnu"]) +AM_CONDITIONAL([CC_INTEL],[test $ax_cv_c_compiler_vendor = "intel"]) +AC_LANG([C++]) +AX_COMPILER_VENDOR +AC_DEFINE_UNQUOTED([CXX_COMP_VENDOR],["$ax_cv_cxx_compiler_vendor"], + [vendor of C++ compiler that will compile the code]) +AM_CONDITIONAL([CXX_GNU],[test $ax_cv_cxx_compiler_vendor = "gnu"]) +AM_CONDITIONAL([CXX_INTEL],[test $ax_cv_cxx_compiler_vendor = "intel"]) +AC_LANG([C]) +AX_GCC_VERSION +AC_DEFINE_UNQUOTED([GCC_VERSION],["$GCC_VERSION"], + [version of gcc that will compile the code]) +AX_GXX_VERSION +AC_DEFINE_UNQUOTED([GXX_VERSION],["$GXX_VERSION"], + [version of g++ that will compile the code]) + +# Checks for libraries. +AC_LANG([C++]) +AC_CHECK_LIB([m],[cos],[],[AC_MSG_ERROR([libm library not found])]) +AC_CHECK_LIB([stdc++],[main],[LIBS="-lstdc++ $LIBS"],[AC_MSG_ERROR([libstdc++ library not found])]) + +# Checks for header files. +AC_HEADER_STDC + +AC_SUBST([LIBS]) +AC_SUBST([AM_CFLAGS]) +AC_SUBST([AM_LDFLAGS]) + +AC_CONFIG_FILES([Makefile latan/Makefile examples/Makefile]) +AC_OUTPUT diff --git a/examples/Makefile.am b/examples/Makefile.am new file mode 100644 index 0000000..69c350d --- /dev/null +++ b/examples/Makefile.am @@ -0,0 +1,24 @@ +if CC_GNU + COM_CFLAGS = -Wall -W -pedantic +else +if CC_INTEL + COM_CFLAGS = -Wall +endif +endif + +if CXX_GNU + COM_CXXFLAGS = -Wall -W -pedantic +else +if CXX_INTEL + COM_CXXFLAGS = -Wall +endif +endif + +noinst_PROGRAMS = \ + ex_test + +ex_test_SOURCES = ex_test.cpp +ex_test_CFLAGS = -g -O2 +ex_test_LDFLAGS = -L../latan/.libs -llatan + +ACLOCAL_AMFLAGS = -I .buildutils/m4 diff --git a/examples/ex_test.cpp b/examples/ex_test.cpp new file mode 100644 index 0000000..51dc2fd --- /dev/null +++ b/examples/ex_test.cpp @@ -0,0 +1,20 @@ +#include +#include + +using namespace std; +using namespace Latan; + +int main(void) +{ + DMat m(2,2); + + m(0,6) = 3; + m(1,0) = 2.5; + m(0,1) = -1; + m(1,1) = m(1,0) + m(0,1); + cout << "Here is the matrix m:\n" << m << endl; + DVec v(2); + v(0) = 4; + v(1) = v(0) - 1; + cout << "Here is the vector v:\n" << v << endl; +} diff --git a/examples/latan b/examples/latan new file mode 120000 index 0000000..03578ae --- /dev/null +++ b/examples/latan @@ -0,0 +1 @@ +../latan \ No newline at end of file diff --git a/latan/Eigen/Array b/latan/Eigen/Array new file mode 100644 index 0000000..3d004fb --- /dev/null +++ b/latan/Eigen/Array @@ -0,0 +1,11 @@ +#ifndef EIGEN_ARRAY_MODULE_H +#define EIGEN_ARRAY_MODULE_H + +// include Core first to handle Eigen2 support macros +#include "Core" + +#ifndef EIGEN2_SUPPORT + #error The Eigen/Array header does no longer exist in Eigen3. All that functionality has moved to Eigen/Core. +#endif + +#endif // EIGEN_ARRAY_MODULE_H diff --git a/latan/Eigen/Cholesky b/latan/Eigen/Cholesky new file mode 100644 index 0000000..f727f5d --- /dev/null +++ b/latan/Eigen/Cholesky @@ -0,0 +1,32 @@ +#ifndef EIGEN_CHOLESKY_MODULE_H +#define EIGEN_CHOLESKY_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Cholesky_Module Cholesky module + * + * + * + * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are accessible via the following MatrixBase methods: + * - MatrixBase::llt(), + * - MatrixBase::ldlt() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/Cholesky/LLT.h" +#include "src/Cholesky/LDLT.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/Cholesky/LLT_MKL.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLESKY_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/CholmodSupport b/latan/Eigen/CholmodSupport new file mode 100644 index 0000000..745b884 --- /dev/null +++ b/latan/Eigen/CholmodSupport @@ -0,0 +1,45 @@ +#ifndef EIGEN_CHOLMODSUPPORT_MODULE_H +#define EIGEN_CHOLMODSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { + #include +} + +/** \ingroup Support_modules + * \defgroup CholmodSupport_Module CholmodSupport module + * + * This module provides an interface to the Cholmod library which is part of the suitesparse package. + * It provides the two following main factorization classes: + * - class CholmodSupernodalLLT: a supernodal LLT Cholesky factorization. + * - class CholmodDecomposiiton: a general L(D)LT Cholesky factorization with automatic or explicit runtime selection of the underlying factorization method (supernodal or simplicial). + * + * For the sake of completeness, this module also propose the two following classes: + * - class CholmodSimplicialLLT + * - class CholmodSimplicialLDLT + * Note that these classes does not bring any particular advantage compared to the built-in + * SimplicialLLT and SimplicialLDLT factorization classes. + * + * \code + * #include + * \endcode + * + * In order to use this module, the cholmod headers must be accessible from the include paths, and your binary must be linked to the cholmod library and its dependencies. + * The dependencies depend on how cholmod has been compiled. + * For a cmake based project, you can use our FindCholmod.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/CholmodSupport/CholmodSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_CHOLMODSUPPORT_MODULE_H + diff --git a/latan/Eigen/Core b/latan/Eigen/Core new file mode 100644 index 0000000..34a6bce --- /dev/null +++ b/latan/Eigen/Core @@ -0,0 +1,366 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2011 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CORE_H +#define EIGEN_CORE_H + +// first thing Eigen does: stop the compiler from committing suicide +#include "src/Core/util/DisableStupidWarnings.h" + +// then include this file where all our macros are defined. It's really important to do it first because +// it's where we do all the alignment settings (platform detection and honoring the user's will if he +// defined e.g. EIGEN_DONT_ALIGN) so it needs to be done before we do anything with vectorization. +#include "src/Core/util/Macros.h" + +#include + +// this include file manages BLAS and MKL related macros +// and inclusion of their respective header files +#include "src/Core/util/MKL_support.h" + +// if alignment is disabled, then disable vectorization. Note: EIGEN_ALIGN is the proper check, it takes into +// account both the user's will (EIGEN_DONT_ALIGN) and our own platform checks +#if !EIGEN_ALIGN + #ifndef EIGEN_DONT_VECTORIZE + #define EIGEN_DONT_VECTORIZE + #endif +#endif + +#ifdef _MSC_VER + #include // for _aligned_malloc -- need it regardless of whether vectorization is enabled + #if (_MSC_VER >= 1500) // 2008 or later + // Remember that usage of defined() in a #define is undefined by the standard. + // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP. + #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64) + #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER + #endif + #endif +#else + // Remember that usage of defined() in a #define is undefined by the standard + #if (defined __SSE2__) && ( (!defined __GNUC__) || (defined __INTEL_COMPILER) || EIGEN_GNUC_AT_LEAST(4,2) ) + #define EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC + #endif +#endif + +#ifndef EIGEN_DONT_VECTORIZE + + #if defined (EIGEN_SSE2_ON_NON_MSVC_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER) + + // Defines symbols for compile-time detection of which instructions are + // used. + // EIGEN_VECTORIZE_YY is defined if and only if the instruction set YY is used + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_SSE + #define EIGEN_VECTORIZE_SSE2 + + // Detect sse3/ssse3/sse4: + // gcc and icc defines __SSE3__, ... + // there is no way to know about this on msvc. You can define EIGEN_VECTORIZE_SSE* if you + // want to force the use of those instructions with msvc. + #ifdef __SSE3__ + #define EIGEN_VECTORIZE_SSE3 + #endif + #ifdef __SSSE3__ + #define EIGEN_VECTORIZE_SSSE3 + #endif + #ifdef __SSE4_1__ + #define EIGEN_VECTORIZE_SSE4_1 + #endif + #ifdef __SSE4_2__ + #define EIGEN_VECTORIZE_SSE4_2 + #endif + + // include files + + // This extern "C" works around a MINGW-w64 compilation issue + // https://sourceforge.net/tracker/index.php?func=detail&aid=3018394&group_id=202880&atid=983354 + // In essence, intrin.h is included by windows.h and also declares intrinsics (just as emmintrin.h etc. below do). + // However, intrin.h uses an extern "C" declaration, and g++ thus complains of duplicate declarations + // with conflicting linkage. The linkage for intrinsics doesn't matter, but at that stage the compiler doesn't know; + // so, to avoid compile errors when windows.h is included after Eigen/Core, ensure intrinsics are extern "C" here too. + // notice that since these are C headers, the extern "C" is theoretically needed anyways. + extern "C" { + #include + #include + #ifdef EIGEN_VECTORIZE_SSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSSE3 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_1 + #include + #endif + #ifdef EIGEN_VECTORIZE_SSE4_2 + #include + #endif + } // end extern "C" + #elif defined __ALTIVEC__ + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_ALTIVEC + #include + // We need to #undef all these ugly tokens defined in + // => use __vector instead of vector + #undef bool + #undef vector + #undef pixel + #elif defined __ARM_NEON__ + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_NEON + #include + #endif +#endif + +#if (defined _OPENMP) && (!defined EIGEN_DONT_PARALLELIZE) + #define EIGEN_HAS_OPENMP +#endif + +#ifdef EIGEN_HAS_OPENMP +#include +#endif + +// MSVC for windows mobile does not have the errno.h file +#if !(defined(_MSC_VER) && defined(_WIN32_WCE)) && !defined(__ARMCC_VERSION) +#define EIGEN_HAS_ERRNO +#endif + +#ifdef EIGEN_HAS_ERRNO +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // for CHAR_BIT +// for min/max: +#include + +// for outputting debug info +#ifdef EIGEN_DEBUG_ASSIGN +#include +#endif + +// required for __cpuid, needs to be included after cmath +#if defined(_MSC_VER) && (defined(_M_IX86)||defined(_M_X64)) + #include +#endif + +#if defined(_CPPUNWIND) || defined(__EXCEPTIONS) + #define EIGEN_EXCEPTIONS +#endif + +#ifdef EIGEN_EXCEPTIONS + #include +#endif + +/** \brief Namespace containing all symbols from the %Eigen library. */ +namespace Eigen { + +inline static const char *SimdInstructionSetsInUse(void) { +#if defined(EIGEN_VECTORIZE_SSE4_2) + return "SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2"; +#elif defined(EIGEN_VECTORIZE_SSE4_1) + return "SSE, SSE2, SSE3, SSSE3, SSE4.1"; +#elif defined(EIGEN_VECTORIZE_SSSE3) + return "SSE, SSE2, SSE3, SSSE3"; +#elif defined(EIGEN_VECTORIZE_SSE3) + return "SSE, SSE2, SSE3"; +#elif defined(EIGEN_VECTORIZE_SSE2) + return "SSE, SSE2"; +#elif defined(EIGEN_VECTORIZE_ALTIVEC) + return "AltiVec"; +#elif defined(EIGEN_VECTORIZE_NEON) + return "ARM NEON"; +#else + return "None"; +#endif +} + +} // end namespace Eigen + +#define STAGE10_FULL_EIGEN2_API 10 +#define STAGE20_RESOLVE_API_CONFLICTS 20 +#define STAGE30_FULL_EIGEN3_API 30 +#define STAGE40_FULL_EIGEN3_STRICTNESS 40 +#define STAGE99_NO_EIGEN2_SUPPORT 99 + +#if defined EIGEN2_SUPPORT_STAGE40_FULL_EIGEN3_STRICTNESS + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE40_FULL_EIGEN3_STRICTNESS +#elif defined EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API +#elif defined EIGEN2_SUPPORT_STAGE20_RESOLVE_API_CONFLICTS + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE20_RESOLVE_API_CONFLICTS +#elif defined EIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API + #define EIGEN2_SUPPORT + #define EIGEN2_SUPPORT_STAGE STAGE10_FULL_EIGEN2_API +#elif defined EIGEN2_SUPPORT + // default to stage 3, that's what it's always meant + #define EIGEN2_SUPPORT_STAGE30_FULL_EIGEN3_API + #define EIGEN2_SUPPORT_STAGE STAGE30_FULL_EIGEN3_API +#else + #define EIGEN2_SUPPORT_STAGE STAGE99_NO_EIGEN2_SUPPORT +#endif + +#ifdef EIGEN2_SUPPORT +#undef minor +#endif + +// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to +// ensure QNX/QCC support +using std::size_t; +// gcc 4.6.0 wants std:: for ptrdiff_t +using std::ptrdiff_t; + +/** \defgroup Core_Module Core module + * This is the main module of Eigen providing dense matrix and vector support + * (both fixed and dynamic size) with all the features corresponding to a BLAS library + * and much more... + * + * \code + * #include + * \endcode + */ + +/** \defgroup Support_modules Support modules [category] + * Category of modules which add support for external libraries. + */ + +#include "src/Core/util/Constants.h" +#include "src/Core/util/ForwardDeclarations.h" +#include "src/Core/util/Meta.h" +#include "src/Core/util/XprHelper.h" +#include "src/Core/util/StaticAssert.h" +#include "src/Core/util/Memory.h" + +#include "src/Core/NumTraits.h" +#include "src/Core/MathFunctions.h" +#include "src/Core/GenericPacketMath.h" + +#if defined EIGEN_VECTORIZE_SSE + #include "src/Core/arch/SSE/PacketMath.h" + #include "src/Core/arch/SSE/MathFunctions.h" + #include "src/Core/arch/SSE/Complex.h" +#elif defined EIGEN_VECTORIZE_ALTIVEC + #include "src/Core/arch/AltiVec/PacketMath.h" + #include "src/Core/arch/AltiVec/Complex.h" +#elif defined EIGEN_VECTORIZE_NEON + #include "src/Core/arch/NEON/PacketMath.h" + #include "src/Core/arch/NEON/Complex.h" +#endif + +#include "src/Core/arch/Default/Settings.h" + +#include "src/Core/Functors.h" +#include "src/Core/DenseCoeffsBase.h" +#include "src/Core/DenseBase.h" +#include "src/Core/MatrixBase.h" +#include "src/Core/EigenBase.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 + // at least confirmed with Doxygen 1.5.5 and 1.5.6 + #include "src/Core/Assign.h" +#endif + +#include "src/Core/util/BlasUtil.h" +#include "src/Core/DenseStorage.h" +#include "src/Core/NestByValue.h" +#include "src/Core/ForceAlignedAccess.h" +#include "src/Core/ReturnByValue.h" +#include "src/Core/NoAlias.h" +#include "src/Core/PlainObjectBase.h" +#include "src/Core/Matrix.h" +#include "src/Core/Array.h" +#include "src/Core/CwiseBinaryOp.h" +#include "src/Core/CwiseUnaryOp.h" +#include "src/Core/CwiseNullaryOp.h" +#include "src/Core/CwiseUnaryView.h" +#include "src/Core/SelfCwiseBinaryOp.h" +#include "src/Core/Dot.h" +#include "src/Core/StableNorm.h" +#include "src/Core/MapBase.h" +#include "src/Core/Stride.h" +#include "src/Core/Map.h" +#include "src/Core/Block.h" +#include "src/Core/VectorBlock.h" +#include "src/Core/Transpose.h" +#include "src/Core/DiagonalMatrix.h" +#include "src/Core/Diagonal.h" +#include "src/Core/DiagonalProduct.h" +#include "src/Core/PermutationMatrix.h" +#include "src/Core/Transpositions.h" +#include "src/Core/Redux.h" +#include "src/Core/Visitor.h" +#include "src/Core/Fuzzy.h" +#include "src/Core/IO.h" +#include "src/Core/Swap.h" +#include "src/Core/CommaInitializer.h" +#include "src/Core/Flagged.h" +#include "src/Core/ProductBase.h" +#include "src/Core/GeneralProduct.h" +#include "src/Core/TriangularMatrix.h" +#include "src/Core/SelfAdjointView.h" +#include "src/Core/products/GeneralBlockPanelKernel.h" +#include "src/Core/products/Parallelizer.h" +#include "src/Core/products/CoeffBasedProduct.h" +#include "src/Core/products/GeneralMatrixVector.h" +#include "src/Core/products/GeneralMatrixMatrix.h" +#include "src/Core/SolveTriangular.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular.h" +#include "src/Core/products/SelfadjointMatrixVector.h" +#include "src/Core/products/SelfadjointMatrixMatrix.h" +#include "src/Core/products/SelfadjointProduct.h" +#include "src/Core/products/SelfadjointRank2Update.h" +#include "src/Core/products/TriangularMatrixVector.h" +#include "src/Core/products/TriangularMatrixMatrix.h" +#include "src/Core/products/TriangularSolverMatrix.h" +#include "src/Core/products/TriangularSolverVector.h" +#include "src/Core/BandMatrix.h" + +#include "src/Core/BooleanRedux.h" +#include "src/Core/Select.h" +#include "src/Core/VectorwiseOp.h" +#include "src/Core/Random.h" +#include "src/Core/Replicate.h" +#include "src/Core/Reverse.h" +#include "src/Core/ArrayBase.h" +#include "src/Core/ArrayWrapper.h" + +#ifdef EIGEN_USE_BLAS +#include "src/Core/products/GeneralMatrixMatrix_MKL.h" +#include "src/Core/products/GeneralMatrixVector_MKL.h" +#include "src/Core/products/GeneralMatrixMatrixTriangular_MKL.h" +#include "src/Core/products/SelfadjointMatrixMatrix_MKL.h" +#include "src/Core/products/SelfadjointMatrixVector_MKL.h" +#include "src/Core/products/TriangularMatrixMatrix_MKL.h" +#include "src/Core/products/TriangularMatrixVector_MKL.h" +#include "src/Core/products/TriangularSolverMatrix_MKL.h" +#endif // EIGEN_USE_BLAS + +#ifdef EIGEN_USE_MKL_VML +#include "src/Core/Assign_MKL.h" +#endif + +#include "src/Core/GlobalFunctions.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#ifdef EIGEN2_SUPPORT +#include "Eigen2Support" +#endif + +#endif // EIGEN_CORE_H diff --git a/latan/Eigen/Dense b/latan/Eigen/Dense new file mode 100644 index 0000000..5768910 --- /dev/null +++ b/latan/Eigen/Dense @@ -0,0 +1,7 @@ +#include "Core" +#include "LU" +#include "Cholesky" +#include "QR" +#include "SVD" +#include "Geometry" +#include "Eigenvalues" diff --git a/latan/Eigen/Eigen b/latan/Eigen/Eigen new file mode 100644 index 0000000..19b40ea --- /dev/null +++ b/latan/Eigen/Eigen @@ -0,0 +1,2 @@ +#include "Dense" +//#include "Sparse" diff --git a/latan/Eigen/Eigen2Support b/latan/Eigen/Eigen2Support new file mode 100644 index 0000000..36156d2 --- /dev/null +++ b/latan/Eigen/Eigen2Support @@ -0,0 +1,82 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN2SUPPORT_H +#define EIGEN2SUPPORT_H + +#if (!defined(EIGEN2_SUPPORT)) || (!defined(EIGEN_CORE_H)) +#error Eigen2 support must be enabled by defining EIGEN2_SUPPORT before including any Eigen header +#endif + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Support_modules + * \defgroup Eigen2Support_Module Eigen2 support module + * This module provides a couple of deprecated functions improving the compatibility with Eigen2. + * + * To use it, define EIGEN2_SUPPORT before including any Eigen header + * \code + * #define EIGEN2_SUPPORT + * \endcode + * + */ + +#include "src/Eigen2Support/Macros.h" +#include "src/Eigen2Support/Memory.h" +#include "src/Eigen2Support/Meta.h" +#include "src/Eigen2Support/Lazy.h" +#include "src/Eigen2Support/Cwise.h" +#include "src/Eigen2Support/CwiseOperators.h" +#include "src/Eigen2Support/TriangularSolver.h" +#include "src/Eigen2Support/Block.h" +#include "src/Eigen2Support/VectorBlock.h" +#include "src/Eigen2Support/Minor.h" +#include "src/Eigen2Support/MathFunctions.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +// Eigen2 used to include iostream +#include + +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ +using Eigen::Matrix##SizeSuffix##TypeSuffix; \ +using Eigen::Vector##SizeSuffix##TypeSuffix; \ +using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ + +#define EIGEN_USING_MATRIX_TYPEDEFS \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd) + +#define USING_PART_OF_NAMESPACE_EIGEN \ +EIGEN_USING_MATRIX_TYPEDEFS \ +using Eigen::Matrix; \ +using Eigen::MatrixBase; \ +using Eigen::ei_random; \ +using Eigen::ei_real; \ +using Eigen::ei_imag; \ +using Eigen::ei_conj; \ +using Eigen::ei_abs; \ +using Eigen::ei_abs2; \ +using Eigen::ei_sqrt; \ +using Eigen::ei_exp; \ +using Eigen::ei_log; \ +using Eigen::ei_sin; \ +using Eigen::ei_cos; + +#endif // EIGEN2SUPPORT_H diff --git a/latan/Eigen/Eigenvalues b/latan/Eigen/Eigenvalues new file mode 100644 index 0000000..af99ccd --- /dev/null +++ b/latan/Eigen/Eigenvalues @@ -0,0 +1,46 @@ +#ifndef EIGEN_EIGENVALUES_MODULE_H +#define EIGEN_EIGENVALUES_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" +#include "LU" +#include "Geometry" + +/** \defgroup Eigenvalues_Module Eigenvalues module + * + * + * + * This module mainly provides various eigenvalue solvers. + * This module also provides some MatrixBase methods, including: + * - MatrixBase::eigenvalues(), + * - MatrixBase::operatorNorm() + * + * \code + * #include + * \endcode + */ + +#include "src/Eigenvalues/Tridiagonalization.h" +#include "src/Eigenvalues/RealSchur.h" +#include "src/Eigenvalues/EigenSolver.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver.h" +#include "src/Eigenvalues/GeneralizedSelfAdjointEigenSolver.h" +#include "src/Eigenvalues/HessenbergDecomposition.h" +#include "src/Eigenvalues/ComplexSchur.h" +#include "src/Eigenvalues/ComplexEigenSolver.h" +#include "src/Eigenvalues/MatrixBaseEigenvalues.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/Eigenvalues/RealSchur_MKL.h" +#include "src/Eigenvalues/ComplexSchur_MKL.h" +#include "src/Eigenvalues/SelfAdjointEigenSolver_MKL.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_EIGENVALUES_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/Geometry b/latan/Eigen/Geometry new file mode 100644 index 0000000..efd9d45 --- /dev/null +++ b/latan/Eigen/Geometry @@ -0,0 +1,63 @@ +#ifndef EIGEN_GEOMETRY_MODULE_H +#define EIGEN_GEOMETRY_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "SVD" +#include "LU" +#include + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +/** \defgroup Geometry_Module Geometry module + * + * + * + * This module provides support for: + * - fixed-size homogeneous transformations + * - translation, scaling, 2D and 3D rotations + * - quaternions + * - \ref MatrixBase::cross() "cross product" + * - \ref MatrixBase::unitOrthogonal() "orthognal vector generation" + * - some linear components: parametrized-lines and hyperplanes + * + * \code + * #include + * \endcode + */ + +#include "src/Geometry/OrthoMethods.h" +#include "src/Geometry/EulerAngles.h" + +#if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS + #include "src/Geometry/Homogeneous.h" + #include "src/Geometry/RotationBase.h" + #include "src/Geometry/Rotation2D.h" + #include "src/Geometry/Quaternion.h" + #include "src/Geometry/AngleAxis.h" + #include "src/Geometry/Transform.h" + #include "src/Geometry/Translation.h" + #include "src/Geometry/Scaling.h" + #include "src/Geometry/Hyperplane.h" + #include "src/Geometry/ParametrizedLine.h" + #include "src/Geometry/AlignedBox.h" + #include "src/Geometry/Umeyama.h" + + #if defined EIGEN_VECTORIZE_SSE + #include "src/Geometry/arch/Geometry_SSE.h" + #endif +#endif + +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/Geometry/All.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_GEOMETRY_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ + diff --git a/latan/Eigen/Householder b/latan/Eigen/Householder new file mode 100644 index 0000000..6e348db --- /dev/null +++ b/latan/Eigen/Householder @@ -0,0 +1,23 @@ +#ifndef EIGEN_HOUSEHOLDER_MODULE_H +#define EIGEN_HOUSEHOLDER_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Householder_Module Householder module + * This module provides Householder transformations. + * + * \code + * #include + * \endcode + */ + +#include "src/Householder/Householder.h" +#include "src/Householder/HouseholderSequence.h" +#include "src/Householder/BlockHouseholder.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_HOUSEHOLDER_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/IterativeLinearSolvers b/latan/Eigen/IterativeLinearSolvers new file mode 100644 index 0000000..315c2dd --- /dev/null +++ b/latan/Eigen/IterativeLinearSolvers @@ -0,0 +1,40 @@ +#ifndef EIGEN_ITERATIVELINEARSOLVERS_MODULE_H +#define EIGEN_ITERATIVELINEARSOLVERS_MODULE_H + +#include "SparseCore" +#include "OrderingMethods" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup IterativeLinearSolvers_Module IterativeLinearSolvers module + * + * This module currently provides iterative methods to solve problems of the form \c A \c x = \c b, where \c A is a squared matrix, usually very large and sparse. + * Those solvers are accessible via the following classes: + * - ConjugateGradient for selfadjoint (hermitian) matrices, + * - BiCGSTAB for general square matrices. + * + * These iterative solvers are associated with some preconditioners: + * - IdentityPreconditioner - not really useful + * - DiagonalPreconditioner - also called JAcobi preconditioner, work very well on diagonal dominant matrices. + * - IncompleteILUT - incomplete LU factorization with dual thresholding + * + * Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport. + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/IterativeLinearSolvers/IterativeSolverBase.h" +#include "src/IterativeLinearSolvers/BasicPreconditioners.h" +#include "src/IterativeLinearSolvers/ConjugateGradient.h" +#include "src/IterativeLinearSolvers/BiCGSTAB.h" +#include "src/IterativeLinearSolvers/IncompleteLUT.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ITERATIVELINEARSOLVERS_MODULE_H diff --git a/latan/Eigen/Jacobi b/latan/Eigen/Jacobi new file mode 100644 index 0000000..ba8a4dc --- /dev/null +++ b/latan/Eigen/Jacobi @@ -0,0 +1,26 @@ +#ifndef EIGEN_JACOBI_MODULE_H +#define EIGEN_JACOBI_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup Jacobi_Module Jacobi module + * This module provides Jacobi and Givens rotations. + * + * \code + * #include + * \endcode + * + * In addition to listed classes, it defines the two following MatrixBase methods to apply a Jacobi or Givens rotation: + * - MatrixBase::applyOnTheLeft() + * - MatrixBase::applyOnTheRight(). + */ + +#include "src/Jacobi/Jacobi.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_JACOBI_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ + diff --git a/latan/Eigen/LU b/latan/Eigen/LU new file mode 100644 index 0000000..db57955 --- /dev/null +++ b/latan/Eigen/LU @@ -0,0 +1,41 @@ +#ifndef EIGEN_LU_MODULE_H +#define EIGEN_LU_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup LU_Module LU module + * This module includes %LU decomposition and related notions such as matrix inversion and determinant. + * This module defines the following MatrixBase methods: + * - MatrixBase::inverse() + * - MatrixBase::determinant() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/misc/Kernel.h" +#include "src/misc/Image.h" +#include "src/LU/FullPivLU.h" +#include "src/LU/PartialPivLU.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/LU/PartialPivLU_MKL.h" +#endif +#include "src/LU/Determinant.h" +#include "src/LU/Inverse.h" + +#if defined EIGEN_VECTORIZE_SSE + #include "src/LU/arch/Inverse_SSE.h" +#endif + +#ifdef EIGEN2_SUPPORT + #include "src/Eigen2Support/LU.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_LU_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/LeastSquares b/latan/Eigen/LeastSquares new file mode 100644 index 0000000..35137c2 --- /dev/null +++ b/latan/Eigen/LeastSquares @@ -0,0 +1,32 @@ +#ifndef EIGEN_REGRESSION_MODULE_H +#define EIGEN_REGRESSION_MODULE_H + +#ifndef EIGEN2_SUPPORT +#error LeastSquares is only available in Eigen2 support mode (define EIGEN2_SUPPORT) +#endif + +// exclude from normal eigen3-only documentation +#ifdef EIGEN2_SUPPORT + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "Eigenvalues" +#include "Geometry" + +/** \defgroup LeastSquares_Module LeastSquares module + * This module provides linear regression and related features. + * + * \code + * #include + * \endcode + */ + +#include "src/Eigen2Support/LeastSquares.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN2_SUPPORT + +#endif // EIGEN_REGRESSION_MODULE_H diff --git a/latan/Eigen/OrderingMethods b/latan/Eigen/OrderingMethods new file mode 100644 index 0000000..1e2d874 --- /dev/null +++ b/latan/Eigen/OrderingMethods @@ -0,0 +1,23 @@ +#ifndef EIGEN_ORDERINGMETHODS_MODULE_H +#define EIGEN_ORDERINGMETHODS_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup OrderingMethods_Module OrderingMethods module + * + * This module is currently for internal use only. + * + * + * \code + * #include + * \endcode + */ + +#include "src/OrderingMethods/Amd.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_ORDERINGMETHODS_MODULE_H diff --git a/latan/Eigen/PaStiXSupport b/latan/Eigen/PaStiXSupport new file mode 100644 index 0000000..7c616ee --- /dev/null +++ b/latan/Eigen/PaStiXSupport @@ -0,0 +1,46 @@ +#ifndef EIGEN_PASTIXSUPPORT_MODULE_H +#define EIGEN_PASTIXSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +extern "C" { +#include +#include +} + +#ifdef complex +#undef complex +#endif + +/** \ingroup Support_modules + * \defgroup PaStiXSupport_Module PaStiXSupport module + * + * This module provides an interface to the PaSTiX library. + * PaSTiX is a general \b supernodal, \b parallel and \b opensource sparse solver. + * It provides the two following main factorization classes: + * - class PastixLLT : a supernodal, parallel LLt Cholesky factorization. + * - class PastixLDLT: a supernodal, parallel LDLt Cholesky factorization. + * - class PastixLU : a supernodal, parallel LU factorization (optimized for a symmetric pattern). + * + * \code + * #include + * \endcode + * + * In order to use this module, the PaSTiX headers must be accessible from the include paths, and your binary must be linked to the PaSTiX library and its dependencies. + * The dependencies depend on how PaSTiX has been compiled. + * For a cmake based project, you can use our FindPaSTiX.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/PaStiXSupport/PaStiXSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PASTIXSUPPORT_MODULE_H diff --git a/latan/Eigen/PardisoSupport b/latan/Eigen/PardisoSupport new file mode 100644 index 0000000..99330ce --- /dev/null +++ b/latan/Eigen/PardisoSupport @@ -0,0 +1,30 @@ +#ifndef EIGEN_PARDISOSUPPORT_MODULE_H +#define EIGEN_PARDISOSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include + +#include + +/** \ingroup Support_modules + * \defgroup PardisoSupport_Module PardisoSupport module + * + * This module brings support for the Intel(R) MKL PARDISO direct sparse solvers. + * + * \code + * #include + * \endcode + * + * In order to use this module, the MKL headers must be accessible from the include paths, and your binary must be linked to the MKL library and its dependencies. + * See this \ref TopicUsingIntelMKL "page" for more information on MKL-Eigen integration. + * + */ + +#include "src/PardisoSupport/PardisoSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_PARDISOSUPPORT_MODULE_H diff --git a/latan/Eigen/QR b/latan/Eigen/QR new file mode 100644 index 0000000..ac5b026 --- /dev/null +++ b/latan/Eigen/QR @@ -0,0 +1,45 @@ +#ifndef EIGEN_QR_MODULE_H +#define EIGEN_QR_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include "Cholesky" +#include "Jacobi" +#include "Householder" + +/** \defgroup QR_Module QR module + * + * + * + * This module provides various QR decompositions + * This module also provides some MatrixBase methods, including: + * - MatrixBase::qr(), + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/QR/HouseholderQR.h" +#include "src/QR/FullPivHouseholderQR.h" +#include "src/QR/ColPivHouseholderQR.h" +#ifdef EIGEN_USE_LAPACKE +#include "src/QR/HouseholderQR_MKL.h" +#include "src/QR/ColPivHouseholderQR_MKL.h" +#endif + +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/QR.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#ifdef EIGEN2_SUPPORT +#include "Eigenvalues" +#endif + +#endif // EIGEN_QR_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/QtAlignedMalloc b/latan/Eigen/QtAlignedMalloc new file mode 100644 index 0000000..46f7d83 --- /dev/null +++ b/latan/Eigen/QtAlignedMalloc @@ -0,0 +1,34 @@ + +#ifndef EIGEN_QTMALLOC_MODULE_H +#define EIGEN_QTMALLOC_MODULE_H + +#include "Core" + +#if (!EIGEN_MALLOC_ALREADY_ALIGNED) + +#include "src/Core/util/DisableStupidWarnings.h" + +void *qMalloc(size_t size) +{ + return Eigen::internal::aligned_malloc(size); +} + +void qFree(void *ptr) +{ + Eigen::internal::aligned_free(ptr); +} + +void *qRealloc(void *ptr, size_t size) +{ + void* newPtr = Eigen::internal::aligned_malloc(size); + memcpy(newPtr, ptr, size); + Eigen::internal::aligned_free(ptr); + return newPtr; +} + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif + +#endif // EIGEN_QTMALLOC_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/SVD b/latan/Eigen/SVD new file mode 100644 index 0000000..fd31001 --- /dev/null +++ b/latan/Eigen/SVD @@ -0,0 +1,37 @@ +#ifndef EIGEN_SVD_MODULE_H +#define EIGEN_SVD_MODULE_H + +#include "QR" +#include "Householder" +#include "Jacobi" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \defgroup SVD_Module SVD module + * + * + * + * This module provides SVD decomposition for matrices (both real and complex). + * This decomposition is accessible via the following MatrixBase method: + * - MatrixBase::jacobiSvd() + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/SVD/JacobiSVD.h" +#if defined(EIGEN_USE_LAPACKE) && !defined(EIGEN_USE_LAPACKE_STRICT) +#include "src/SVD/JacobiSVD_MKL.h" +#endif +#include "src/SVD/UpperBidiagonalization.h" + +#ifdef EIGEN2_SUPPORT +#include "src/Eigen2Support/SVD.h" +#endif + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SVD_MODULE_H +/* vim: set filetype=cpp et sw=2 ts=2 ai: */ diff --git a/latan/Eigen/Sparse b/latan/Eigen/Sparse new file mode 100644 index 0000000..2d17571 --- /dev/null +++ b/latan/Eigen/Sparse @@ -0,0 +1,23 @@ +#ifndef EIGEN_SPARSE_MODULE_H +#define EIGEN_SPARSE_MODULE_H + +/** \defgroup Sparse_modules Sparse modules + * + * Meta-module including all related modules: + * - SparseCore + * - OrderingMethods + * - SparseCholesky + * - IterativeLinearSolvers + * + * \code + * #include + * \endcode + */ + +#include "SparseCore" +#include "OrderingMethods" +#include "SparseCholesky" +#include "IterativeLinearSolvers" + +#endif // EIGEN_SPARSE_MODULE_H + diff --git a/latan/Eigen/SparseCholesky b/latan/Eigen/SparseCholesky new file mode 100644 index 0000000..5f82742 --- /dev/null +++ b/latan/Eigen/SparseCholesky @@ -0,0 +1,30 @@ +#ifndef EIGEN_SPARSECHOLESKY_MODULE_H +#define EIGEN_SPARSECHOLESKY_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +/** \ingroup Sparse_modules + * \defgroup SparseCholesky_Module SparseCholesky module + * + * This module currently provides two variants of the direct sparse Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are accessible via the following classes: + * - SimplicialLLt, + * - SimplicialLDLt + * + * Such problems can also be solved using the ConjugateGradient solver from the IterativeLinearSolvers module. + * + * \code + * #include + * \endcode + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/SparseCholesky/SimplicialCholesky.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECHOLESKY_MODULE_H diff --git a/latan/Eigen/SparseCore b/latan/Eigen/SparseCore new file mode 100644 index 0000000..41d28c9 --- /dev/null +++ b/latan/Eigen/SparseCore @@ -0,0 +1,66 @@ +#ifndef EIGEN_SPARSECORE_MODULE_H +#define EIGEN_SPARSECORE_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableStupidWarnings.h" + +#include +#include +#include +#include +#include + +/** \ingroup Sparse_modules + * \defgroup SparseCore_Module SparseCore module + * + * This module provides a sparse matrix representation, and basic associatd matrix manipulations + * and operations. + * + * See the \ref TutorialSparse "Sparse tutorial" + * + * \code + * #include + * \endcode + * + * This module depends on: Core. + */ + +namespace Eigen { + +/** The type used to identify a general sparse storage. */ +struct Sparse {}; + +} + +#include "src/SparseCore/SparseUtil.h" +#include "src/SparseCore/SparseMatrixBase.h" +#include "src/SparseCore/CompressedStorage.h" +#include "src/SparseCore/AmbiVector.h" +#include "src/SparseCore/SparseMatrix.h" +#include "src/SparseCore/MappedSparseMatrix.h" +#include "src/SparseCore/SparseVector.h" +#include "src/SparseCore/CoreIterators.h" +#include "src/SparseCore/SparseBlock.h" +#include "src/SparseCore/SparseTranspose.h" +#include "src/SparseCore/SparseCwiseUnaryOp.h" +#include "src/SparseCore/SparseCwiseBinaryOp.h" +#include "src/SparseCore/SparseDot.h" +#include "src/SparseCore/SparsePermutation.h" +#include "src/SparseCore/SparseAssign.h" +#include "src/SparseCore/SparseRedux.h" +#include "src/SparseCore/SparseFuzzy.h" +#include "src/SparseCore/ConservativeSparseSparseProduct.h" +#include "src/SparseCore/SparseSparseProductWithPruning.h" +#include "src/SparseCore/SparseProduct.h" +#include "src/SparseCore/SparseDenseProduct.h" +#include "src/SparseCore/SparseDiagonalProduct.h" +#include "src/SparseCore/SparseTriangularView.h" +#include "src/SparseCore/SparseSelfAdjointView.h" +#include "src/SparseCore/TriangularSolver.h" +#include "src/SparseCore/SparseView.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SPARSECORE_MODULE_H + diff --git a/latan/Eigen/StdDeque b/latan/Eigen/StdDeque new file mode 100644 index 0000000..f272347 --- /dev/null +++ b/latan/Eigen/StdDeque @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDDEQUE_MODULE_H +#define EIGEN_STDDEQUE_MODULE_H + +#include "Core" +#include + +#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ + +#define EIGEN_DEFINE_STL_DEQUE_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdDeque.h" + +#endif + +#endif // EIGEN_STDDEQUE_MODULE_H diff --git a/latan/Eigen/StdList b/latan/Eigen/StdList new file mode 100644 index 0000000..225c1e1 --- /dev/null +++ b/latan/Eigen/StdList @@ -0,0 +1,26 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDLIST_MODULE_H +#define EIGEN_STDLIST_MODULE_H + +#include "Core" +#include + +#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ + +#define EIGEN_DEFINE_STL_LIST_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdList.h" + +#endif + +#endif // EIGEN_STDLIST_MODULE_H diff --git a/latan/Eigen/StdVector b/latan/Eigen/StdVector new file mode 100644 index 0000000..6b22627 --- /dev/null +++ b/latan/Eigen/StdVector @@ -0,0 +1,27 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_STDVECTOR_MODULE_H +#define EIGEN_STDVECTOR_MODULE_H + +#include "Core" +#include + +#if (defined(_MSC_VER) && defined(_WIN64)) /* MSVC auto aligns in 64 bit builds */ + +#define EIGEN_DEFINE_STL_VECTOR_SPECIALIZATION(...) + +#else + +#include "src/StlSupport/StdVector.h" + +#endif + +#endif // EIGEN_STDVECTOR_MODULE_H diff --git a/latan/Eigen/SuperLUSupport b/latan/Eigen/SuperLUSupport new file mode 100644 index 0000000..575e14f --- /dev/null +++ b/latan/Eigen/SuperLUSupport @@ -0,0 +1,59 @@ +#ifndef EIGEN_SUPERLUSUPPORT_MODULE_H +#define EIGEN_SUPERLUSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +#ifdef EMPTY +#define EIGEN_EMPTY_WAS_ALREADY_DEFINED +#endif + +typedef int int_t; +#include +#include +#include + +// slu_util.h defines a preprocessor token named EMPTY which is really polluting, +// so we remove it in favor of a SUPERLU_EMPTY token. +// If EMPTY was already defined then we don't undef it. + +#if defined(EIGEN_EMPTY_WAS_ALREADY_DEFINED) +# undef EIGEN_EMPTY_WAS_ALREADY_DEFINED +#elif defined(EMPTY) +# undef EMPTY +#endif + +#define SUPERLU_EMPTY (-1) + +namespace Eigen { struct SluMatrix; } + +/** \ingroup Support_modules + * \defgroup SuperLUSupport_Module SuperLUSupport module + * + * This module provides an interface to the SuperLU library. + * It provides the following factorization class: + * - class SuperLU: a supernodal sequential LU factorization. + * - class SuperILU: a supernodal sequential incomplete LU factorization (to be used as a preconditioner for iterative methods). + * + * \warning When including this module, you have to use SUPERLU_EMPTY instead of EMPTY which is no longer defined because it is too polluting. + * + * \code + * #include + * \endcode + * + * In order to use this module, the superlu headers must be accessible from the include paths, and your binary must be linked to the superlu library and its dependencies. + * The dependencies depend on how superlu has been compiled. + * For a cmake based project, you can use our FindSuperLU.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/SuperLUSupport/SuperLUSupport.h" + + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_SUPERLUSUPPORT_MODULE_H diff --git a/latan/Eigen/UmfPackSupport b/latan/Eigen/UmfPackSupport new file mode 100644 index 0000000..984f64a --- /dev/null +++ b/latan/Eigen/UmfPackSupport @@ -0,0 +1,36 @@ +#ifndef EIGEN_UMFPACKSUPPORT_MODULE_H +#define EIGEN_UMFPACKSUPPORT_MODULE_H + +#include "SparseCore" + +#include "src/Core/util/DisableStupidWarnings.h" + +extern "C" { +#include +} + +/** \ingroup Support_modules + * \defgroup UmfPackSupport_Module UmfPackSupport module + * + * This module provides an interface to the UmfPack library which is part of the suitesparse package. + * It provides the following factorization class: + * - class UmfPackLU: a multifrontal sequential LU factorization. + * + * \code + * #include + * \endcode + * + * In order to use this module, the umfpack headers must be accessible from the include paths, and your binary must be linked to the umfpack library and its dependencies. + * The dependencies depend on how umfpack has been compiled. + * For a cmake based project, you can use our FindUmfPack.cmake module to help you in this task. + * + */ + +#include "src/misc/Solve.h" +#include "src/misc/SparseSolve.h" + +#include "src/UmfPackSupport/UmfPackSupport.h" + +#include "src/Core/util/ReenableStupidWarnings.h" + +#endif // EIGEN_UMFPACKSUPPORT_MODULE_H diff --git a/latan/Eigen/src/Cholesky/LDLT.h b/latan/Eigen/src/Cholesky/LDLT.h new file mode 100644 index 0000000..5edc9b4 --- /dev/null +++ b/latan/Eigen/src/Cholesky/LDLT.h @@ -0,0 +1,591 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// Copyright (C) 2009 Keir Mierle +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2011 Timothy E. Holy +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LDLT_H +#define EIGEN_LDLT_H + +namespace Eigen { + +namespace internal { +template struct LDLT_Traits; +} + +/** \ingroup Cholesky_Module + * + * \class LDLT + * + * \brief Robust Cholesky decomposition of a matrix with pivoting + * + * \param MatrixType the type of the matrix of which to compute the LDL^T Cholesky decomposition + * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. + * + * Perform a robust Cholesky decomposition of a positive semidefinite or negative semidefinite + * matrix \f$ A \f$ such that \f$ A = P^TLDL^*P \f$, where P is a permutation matrix, L + * is lower triangular with a unit diagonal and D is a diagonal matrix. + * + * The decomposition uses pivoting to ensure stability, so that L will have + * zeros in the bottom right rank(A) - n submatrix. Avoiding the square root + * on D also stabilizes the computation. + * + * Remember that Cholesky decompositions are not rank-revealing. Also, do not use a Cholesky + * decomposition to determine whether a system of equations has a solution. + * + * \sa MatrixBase::ldlt(), class LLT + */ +template class LDLT +{ + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options & ~RowMajorBit, // these are the options for the TmpMatrixType, we need a ColMajor matrix here! + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + UpLo = _UpLo + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; + typedef Matrix TmpMatrixType; + + typedef Transpositions TranspositionType; + typedef PermutationMatrix PermutationType; + + typedef internal::LDLT_Traits Traits; + + /** \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LDLT::compute(const MatrixType&). + */ + LDLT() : m_matrix(), m_transpositions(), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LDLT() + */ + LDLT(Index size) + : m_matrix(size, size), + m_transpositions(size), + m_temporary(size), + m_isInitialized(false) + {} + + /** \brief Constructor with decomposition + * + * This calculates the decomposition for the input \a matrix. + * \sa LDLT(Index size) + */ + LDLT(const MatrixType& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_transpositions(matrix.rows()), + m_temporary(matrix.rows()), + m_isInitialized(false) + { + compute(matrix); + } + + /** Clear any existing decomposition + * \sa rankUpdate(w,sigma) + */ + void setZero() + { + m_isInitialized = false; + } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Traits::getL(m_matrix); + } + + /** \returns the permutation matrix P as a transposition sequence. + */ + inline const TranspositionType& transpositionsP() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_transpositions; + } + + /** \returns the coefficients of the diagonal matrix D */ + inline Diagonal vectorD() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix.diagonal(); + } + + /** \returns true if the matrix is positive (semidefinite) */ + inline bool isPositive() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == 1; + } + + #ifdef EIGEN2_SUPPORT + inline bool isPositiveDefinite() const + { + return isPositive(); + } + #endif + + /** \returns true if the matrix is negative (semidefinite) */ + inline bool isNegative(void) const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_sign == -1; + } + + /** \returns a solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * This function also supports in-place solves using the syntax x = decompositionObject.solve(x) . + * + * \note_about_checking_solutions + * + * More precisely, this method solves \f$ A x = b \f$ using the decomposition \f$ A = P^T L D L^* P \f$ + * by solving the systems \f$ P^T y_1 = b \f$, \f$ L y_2 = y_1 \f$, \f$ D y_3 = y_2 \f$, + * \f$ L^* y_4 = y_3 \f$ and \f$ P x = y_4 \f$ in succession. If the matrix \f$ A \f$ is singular, then + * \f$ D \f$ will also be singular (all the other matrices are invertible). In that case, the + * least-square solution of \f$ D y_3 = y_2 \f$ is computed. This does not mean that this function + * computes the least-square solution of \f$ A x = b \f$ is \f$ A \f$ is singular. + * + * \sa MatrixBase::ldlt() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_matrix.rows()==b.rows() + && "LDLT::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + #ifdef EIGEN2_SUPPORT + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = this->solve(b); + return true; + } + #endif + + template + bool solveInPlace(MatrixBase &bAndX) const; + + LDLT& compute(const MatrixType& matrix); + + template + LDLT& rankUpdate(const MatrixBase& w,RealScalar alpha=1); + + /** \returns the internal LDLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLDLT() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "LDLT is not initialized."); + return Success; + } + + protected: + + /** \internal + * Used to compute and store the Cholesky decomposition A = L D L^* = U^* D U. + * The strict upper part is used during the decomposition, the strict lower + * part correspond to the coefficients of L (its diagonal is equal to 1 and + * is not stored), and the diagonal entries correspond to D. + */ + MatrixType m_matrix; + TranspositionType m_transpositions; + TmpMatrixType m_temporary; + int m_sign; + bool m_isInitialized; +}; + +namespace internal { + +template struct ldlt_inplace; + +template<> struct ldlt_inplace +{ + template + static bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + { + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + eigen_assert(mat.rows()==mat.cols()); + const Index size = mat.rows(); + + if (size <= 1) + { + transpositions.setIdentity(); + if(sign) + *sign = real(mat.coeff(0,0))>0 ? 1:-1; + return true; + } + + RealScalar cutoff(0), biggest_in_corner; + + for (Index k = 0; k < size; ++k) + { + // Find largest diagonal element + Index index_of_biggest_in_corner; + biggest_in_corner = mat.diagonal().tail(size-k).cwiseAbs().maxCoeff(&index_of_biggest_in_corner); + index_of_biggest_in_corner += k; + + if(k == 0) + { + // The biggest overall is the point of reference to which further diagonals + // are compared; if any diagonal is negligible compared + // to the largest overall, the algorithm bails. + cutoff = abs(NumTraits::epsilon() * biggest_in_corner); + + if(sign) + *sign = real(mat.diagonal().coeff(index_of_biggest_in_corner)) > 0 ? 1 : -1; + } + + // Finish early if the matrix is not full rank. + if(biggest_in_corner < cutoff) + { + for(Index i = k; i < size; i++) transpositions.coeffRef(i) = i; + break; + } + + transpositions.coeffRef(k) = index_of_biggest_in_corner; + if(k != index_of_biggest_in_corner) + { + // apply the transposition while taking care to consider only + // the lower triangular part + Index s = size-index_of_biggest_in_corner-1; // trailing size after the biggest element + mat.row(k).head(k).swap(mat.row(index_of_biggest_in_corner).head(k)); + mat.col(k).tail(s).swap(mat.col(index_of_biggest_in_corner).tail(s)); + std::swap(mat.coeffRef(k,k),mat.coeffRef(index_of_biggest_in_corner,index_of_biggest_in_corner)); + for(int i=k+1;i::IsComplex) + mat.coeffRef(index_of_biggest_in_corner,k) = conj(mat.coeff(index_of_biggest_in_corner,k)); + } + + // partition the matrix: + // A00 | - | - + // lu = A10 | A11 | - + // A20 | A21 | A22 + Index rs = size - k - 1; + Block A21(mat,k+1,k,rs,1); + Block A10(mat,k,0,1,k); + Block A20(mat,k+1,0,rs,k); + + if(k>0) + { + temp.head(k) = mat.diagonal().head(k).asDiagonal() * A10.adjoint(); + mat.coeffRef(k,k) -= (A10 * temp.head(k)).value(); + if(rs>0) + A21.noalias() -= A20 * temp.head(k); + } + if((rs>0) && (abs(mat.coeffRef(k,k)) > cutoff)) + A21 /= mat.coeffRef(k,k); + } + + return true; + } + + // Reference for the algorithm: Davis and Hager, "Multiple Rank + // Modifications of a Sparse Cholesky Factorization" (Algorithm 1) + // Trivial rearrangements of their computations (Timothy E. Holy) + // allow their algorithm to work for rank-1 updates even if the + // original matrix is not of full rank. + // Here only rank-1 updates are implemented, to reduce the + // requirement for intermediate storage and improve accuracy + template + static bool updateInPlace(MatrixType& mat, MatrixBase& w, typename MatrixType::RealScalar sigma=1) + { + using internal::isfinite; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + + const Index size = mat.rows(); + eigen_assert(mat.cols() == size && w.size()==size); + + RealScalar alpha = 1; + + // Apply the update + for (Index j = 0; j < size; j++) + { + // Check for termination due to an original decomposition of low-rank + if (!(isfinite)(alpha)) + break; + + // Update the diagonal terms + RealScalar dj = real(mat.coeff(j,j)); + Scalar wj = w.coeff(j); + RealScalar swj2 = sigma*abs2(wj); + RealScalar gamma = dj*alpha + swj2; + + mat.coeffRef(j,j) += swj2/alpha; + alpha += swj2/dj; + + + // Update the terms of L + Index rs = size-j-1; + w.tail(rs) -= wj * mat.col(j).tail(rs); + if(gamma != 0) + mat.col(j).tail(rs) += (sigma*conj(wj)/gamma)*w.tail(rs); + } + return true; + } + + template + static bool update(MatrixType& mat, const TranspositionType& transpositions, Workspace& tmp, const WType& w, typename MatrixType::RealScalar sigma=1) + { + // Apply the permutation to the input w + tmp = transpositions * w; + + return ldlt_inplace::updateInPlace(mat,tmp,sigma); + } +}; + +template<> struct ldlt_inplace +{ + template + static EIGEN_STRONG_INLINE bool unblocked(MatrixType& mat, TranspositionType& transpositions, Workspace& temp, int* sign=0) + { + Transpose matt(mat); + return ldlt_inplace::unblocked(matt, transpositions, temp, sign); + } + + template + static EIGEN_STRONG_INLINE bool update(MatrixType& mat, TranspositionType& transpositions, Workspace& tmp, WType& w, typename MatrixType::RealScalar sigma=1) + { + Transpose matt(mat); + return ldlt_inplace::update(matt, transpositions, tmp, w.conjugate(), sigma); + } +}; + +template struct LDLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } +}; + +template struct LDLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } + static inline MatrixU getU(const MatrixType& m) { return m; } +}; + +} // end namespace internal + +/** Compute / recompute the LDLT decomposition A = L D L^* = U^* D U of \a matrix + */ +template +LDLT& LDLT::compute(const MatrixType& a) +{ + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + + m_matrix = a; + + m_transpositions.resize(size); + m_isInitialized = false; + m_temporary.resize(size); + + internal::ldlt_inplace::unblocked(m_matrix, m_transpositions, m_temporary, &m_sign); + + m_isInitialized = true; + return *this; +} + +/** Update the LDLT decomposition: given A = L D L^T, efficiently compute the decomposition of A + sigma w w^T. + * \param w a vector to be incorporated into the decomposition. + * \param sigma a scalar, +1 for updates and -1 for "downdates," which correspond to removing previously-added column vectors. Optional; default value is +1. + * \sa setZero() + */ +template +template +LDLT& LDLT::rankUpdate(const MatrixBase& w,typename NumTraits::Real sigma) +{ + const Index size = w.rows(); + if (m_isInitialized) + { + eigen_assert(m_matrix.rows()==size); + } + else + { + m_matrix.resize(size,size); + m_matrix.setZero(); + m_transpositions.resize(size); + for (Index i = 0; i < size; i++) + m_transpositions.coeffRef(i) = i; + m_temporary.resize(size); + m_sign = sigma>=0 ? 1 : -1; + m_isInitialized = true; + } + + internal::ldlt_inplace::update(m_matrix, m_transpositions, m_temporary, w, sigma); + + return *this; +} + +namespace internal { +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef LDLT<_MatrixType,_UpLo> LDLTType; + EIGEN_MAKE_SOLVE_HELPERS(LDLTType,Rhs) + + template void evalTo(Dest& dst) const + { + eigen_assert(rhs().rows() == dec().matrixLDLT().rows()); + // dst = P b + dst = dec().transpositionsP() * rhs(); + + // dst = L^-1 (P b) + dec().matrixL().solveInPlace(dst); + + // dst = D^-1 (L^-1 P b) + // more precisely, use pseudo-inverse of D (see bug 241) + using std::abs; + using std::max; + typedef typename LDLTType::MatrixType MatrixType; + typedef typename LDLTType::Scalar Scalar; + typedef typename LDLTType::RealScalar RealScalar; + const Diagonal vectorD = dec().vectorD(); + RealScalar tolerance = (max)(vectorD.array().abs().maxCoeff() * NumTraits::epsilon(), + RealScalar(1) / NumTraits::highest()); // motivated by LAPACK's xGELSS + for (Index i = 0; i < vectorD.size(); ++i) { + if(abs(vectorD(i)) > tolerance) + dst.row(i) /= vectorD(i); + else + dst.row(i).setZero(); + } + + // dst = L^-T (D^-1 L^-1 P b) + dec().matrixU().solveInPlace(dst); + + // dst = P^-1 (L^-T D^-1 L^-1 P b) = A^-1 b + dst = dec().transpositionsP().transpose() * dst; + } +}; +} + +/** \internal use x = ldlt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LDLT::solve(), MatrixBase::ldlt() + */ +template +template +bool LDLT::solveInPlace(MatrixBase &bAndX) const +{ + eigen_assert(m_isInitialized && "LDLT is not initialized."); + eigen_assert(m_matrix.rows() == bAndX.rows()); + + bAndX = this->solve(bAndX); + + return true; +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: P^T L D L^* P. + * This function is provided for debug purpose. */ +template +MatrixType LDLT::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LDLT is not initialized."); + const Index size = m_matrix.rows(); + MatrixType res(size,size); + + // P + res.setIdentity(); + res = transpositionsP() * res; + // L^* P + res = matrixU() * res; + // D(L^*P) + res = vectorD().asDiagonal() * res; + // L(DL^*P) + res = matrixL() * res; + // P^T (LDL^*P) + res = transpositionsP().transpose() * res; + + return res; +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + */ +template +inline const LDLT::PlainObject, UpLo> +SelfAdjointView::ldlt() const +{ + return LDLT(m_matrix); +} + +/** \cholesky_module + * \returns the Cholesky decomposition with full pivoting without square root of \c *this + */ +template +inline const LDLT::PlainObject> +MatrixBase::ldlt() const +{ + return LDLT(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_LDLT_H diff --git a/latan/Eigen/src/Cholesky/LLT.h b/latan/Eigen/src/Cholesky/LLT.h new file mode 100644 index 0000000..41d14e5 --- /dev/null +++ b/latan/Eigen/src/Cholesky/LLT.h @@ -0,0 +1,488 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_LLT_H +#define EIGEN_LLT_H + +namespace Eigen { + +namespace internal{ +template struct LLT_Traits; +} + +/** \ingroup Cholesky_Module + * + * \class LLT + * + * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition + * \param UpLo the triangular part that will be used for the decompositon: Lower (default) or Upper. + * The other triangular part won't be read. + * + * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite + * matrix A such that A = LL^* = U^*U, where L is lower triangular. + * + * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, + * for that purpose, we recommend the Cholesky decomposition without square root which is more stable + * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other + * situations like generalised eigen problems with hermitian matrices. + * + * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, + * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations + * has a solution. + * + * Example: \include LLT_example.cpp + * Output: \verbinclude LLT_example.out + * + * \sa MatrixBase::llt(), class LDLT + */ + /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) + * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, + * the strict lower part does not have to store correct values. + */ +template class LLT +{ + public: + typedef _MatrixType MatrixType; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + Options = MatrixType::Options, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime + }; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename MatrixType::Index Index; + + enum { + PacketSize = internal::packet_traits::size, + AlignmentMask = int(PacketSize)-1, + UpLo = _UpLo + }; + + typedef internal::LLT_Traits Traits; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LLT::compute(const MatrixType&). + */ + LLT() : m_matrix(), m_isInitialized(false) {} + + /** \brief Default Constructor with memory preallocation + * + * Like the default constructor but with preallocation of the internal data + * according to the specified problem \a size. + * \sa LLT() + */ + LLT(Index size) : m_matrix(size, size), + m_isInitialized(false) {} + + LLT(const MatrixType& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + /** \returns a view of the upper triangular matrix U */ + inline typename Traits::MatrixU matrixU() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getU(m_matrix); + } + + /** \returns a view of the lower triangular matrix L */ + inline typename Traits::MatrixL matrixL() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return Traits::getL(m_matrix); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * Since this LLT class assumes anyway that the matrix A is invertible, the solution + * theoretically exists and is unique regardless of b. + * + * Example: \include LLT_solve.cpp + * Output: \verbinclude LLT_solve.out + * + * \sa solveInPlace(), MatrixBase::llt() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows()==b.rows() + && "LLT::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + #ifdef EIGEN2_SUPPORT + template + bool solve(const MatrixBase& b, ResultType *result) const + { + *result = this->solve(b); + return true; + } + + bool isPositiveDefinite() const { return true; } + #endif + + template + void solveInPlace(MatrixBase &bAndX) const; + + LLT& compute(const MatrixType& matrix); + + /** \returns the LLT decomposition matrix + * + * TODO: document the storage layout + */ + inline const MatrixType& matrixLLT() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_matrix; + } + + MatrixType reconstructedMatrix() const; + + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + return m_info; + } + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + template + LLT rankUpdate(const VectorType& vec, const RealScalar& sigma = 1); + + protected: + /** \internal + * Used to compute and store L + * The strict upper part is not used and even not initialized. + */ + MatrixType m_matrix; + bool m_isInitialized; + ComputationInfo m_info; +}; + +namespace internal { + +template struct llt_inplace; + +template +static typename MatrixType::Index llt_rank_update_lower(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef typename MatrixType::Index Index; + typedef typename MatrixType::ColXpr ColXpr; + typedef typename internal::remove_all::type ColXprCleaned; + typedef typename ColXprCleaned::SegmentReturnType ColXprSegment; + typedef Matrix TempVectorType; + typedef typename TempVectorType::SegmentReturnType TempVecSegment; + + int n = mat.cols(); + eigen_assert(mat.rows()==n && vec.size()==n); + + TempVectorType temp; + + if(sigma>0) + { + // This version is based on Givens rotations. + // It is faster than the other one below, but only works for updates, + // i.e., for sigma > 0 + temp = sqrt(sigma) * vec; + + for(int i=0; i g; + g.makeGivens(mat(i,i), -temp(i), &mat(i,i)); + + int rs = n-i-1; + if(rs>0) + { + ColXprSegment x(mat.col(i).tail(rs)); + TempVecSegment y(temp.tail(rs)); + apply_rotation_in_the_plane(x, y, g); + } + } + } + else + { + temp = vec; + RealScalar beta = 1; + for(int j=0; j struct llt_inplace +{ + typedef typename NumTraits::Real RealScalar; + template + static typename MatrixType::Index unblocked(MatrixType& mat) + { + typedef typename MatrixType::Index Index; + + eigen_assert(mat.rows()==mat.cols()); + const Index size = mat.rows(); + for(Index k = 0; k < size; ++k) + { + Index rs = size-k-1; // remaining size + + Block A21(mat,k+1,k,rs,1); + Block A10(mat,k,0,1,k); + Block A20(mat,k+1,0,rs,k); + + RealScalar x = real(mat.coeff(k,k)); + if (k>0) x -= A10.squaredNorm(); + if (x<=RealScalar(0)) + return k; + mat.coeffRef(k,k) = x = sqrt(x); + if (k>0 && rs>0) A21.noalias() -= A20 * A10.adjoint(); + if (rs>0) A21 *= RealScalar(1)/x; + } + return -1; + } + + template + static typename MatrixType::Index blocked(MatrixType& m) + { + typedef typename MatrixType::Index Index; + eigen_assert(m.rows()==m.cols()); + Index size = m.rows(); + if(size<32) + return unblocked(m); + + Index blockSize = size/8; + blockSize = (blockSize/16)*16; + blockSize = (std::min)((std::max)(blockSize,Index(8)), Index(128)); + + for (Index k=0; k A11(m,k, k, bs,bs); + Block A21(m,k+bs,k, rs,bs); + Block A22(m,k+bs,k+bs,rs,rs); + + Index ret; + if((ret=unblocked(A11))>=0) return k+ret; + if(rs>0) A11.adjoint().template triangularView().template solveInPlace(A21); + if(rs>0) A22.template selfadjointView().rankUpdate(A21,-1); // bottleneck + } + return -1; + } + + template + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); + } +}; + +template struct llt_inplace +{ + typedef typename NumTraits::Real RealScalar; + + template + static EIGEN_STRONG_INLINE typename MatrixType::Index unblocked(MatrixType& mat) + { + Transpose matt(mat); + return llt_inplace::unblocked(matt); + } + template + static EIGEN_STRONG_INLINE typename MatrixType::Index blocked(MatrixType& mat) + { + Transpose matt(mat); + return llt_inplace::blocked(matt); + } + template + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const RealScalar& sigma) + { + Transpose matt(mat); + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); + } +}; + +template struct LLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m; } + static inline MatrixU getU(const MatrixType& m) { return m.adjoint(); } + static bool inplace_decomposition(MatrixType& m) + { return llt_inplace::blocked(m)==-1; } +}; + +template struct LLT_Traits +{ + typedef const TriangularView MatrixL; + typedef const TriangularView MatrixU; + static inline MatrixL getL(const MatrixType& m) { return m.adjoint(); } + static inline MatrixU getU(const MatrixType& m) { return m; } + static bool inplace_decomposition(MatrixType& m) + { return llt_inplace::blocked(m)==-1; } +}; + +} // end namespace internal + +/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix + * + * \returns a reference to *this + * + * Example: \include TutorialLinAlgComputeTwice.cpp + * Output: \verbinclude TutorialLinAlgComputeTwice.out + */ +template +LLT& LLT::compute(const MatrixType& a) +{ + eigen_assert(a.rows()==a.cols()); + const Index size = a.rows(); + m_matrix.resize(size, size); + m_matrix = a; + + m_isInitialized = true; + bool ok = Traits::inplace_decomposition(m_matrix); + m_info = ok ? Success : NumericalIssue; + + return *this; +} + +/** Performs a rank one update (or dowdate) of the current decomposition. + * If A = LL^* before the rank one update, + * then after it we have LL^* = A + sigma * v v^* where \a v must be a vector + * of same dimension. + */ +template +template +LLT<_MatrixType,_UpLo> LLT<_MatrixType,_UpLo>::rankUpdate(const VectorType& v, const RealScalar& sigma) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType); + eigen_assert(v.size()==m_matrix.cols()); + eigen_assert(m_isInitialized); + if(internal::llt_inplace::rankUpdate(m_matrix,v,sigma)>=0) + m_info = NumericalIssue; + else + m_info = Success; + + return *this; +} + +namespace internal { +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef LLT<_MatrixType,UpLo> LLTType; + EIGEN_MAKE_SOLVE_HELPERS(LLTType,Rhs) + + template void evalTo(Dest& dst) const + { + dst = rhs(); + dec().solveInPlace(dst); + } +}; +} + +/** \internal use x = llt_object.solve(x); + * + * This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LLT::solve(), MatrixBase::llt() + */ +template +template +void LLT::solveInPlace(MatrixBase &bAndX) const +{ + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(m_matrix.rows()==bAndX.rows()); + matrixL().solveInPlace(bAndX); + matrixU().solveInPlace(bAndX); +} + +/** \returns the matrix represented by the decomposition, + * i.e., it returns the product: L L^*. + * This function is provided for debug purpose. */ +template +MatrixType LLT::reconstructedMatrix() const +{ + eigen_assert(m_isInitialized && "LLT is not initialized."); + return matrixL() * matrixL().adjoint().toDenseMatrix(); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + */ +template +inline const LLT::PlainObject> +MatrixBase::llt() const +{ + return LLT(derived()); +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + */ +template +inline const LLT::PlainObject, UpLo> +SelfAdjointView::llt() const +{ + return LLT(m_matrix); +} + +} // end namespace Eigen + +#endif // EIGEN_LLT_H diff --git a/latan/Eigen/src/Cholesky/LLT_MKL.h b/latan/Eigen/src/Cholesky/LLT_MKL.h new file mode 100644 index 0000000..64daa44 --- /dev/null +++ b/latan/Eigen/src/Cholesky/LLT_MKL.h @@ -0,0 +1,102 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * LLt decomposition based on LAPACKE_?potrf function. + ******************************************************************************** +*/ + +#ifndef EIGEN_LLT_MKL_H +#define EIGEN_LLT_MKL_H + +#include "Eigen/src/Core/util/MKL_support.h" +#include + +namespace Eigen { + +namespace internal { + +template struct mkl_llt; + +#define EIGEN_MKL_LLT(EIGTYPE, MKLTYPE, MKLPREFIX) \ +template<> struct mkl_llt \ +{ \ + template \ + static inline typename MatrixType::Index potrf(MatrixType& m, char uplo) \ + { \ + lapack_int matrix_order; \ + lapack_int size, lda, info, StorageOrder; \ + EIGTYPE* a; \ + eigen_assert(m.rows()==m.cols()); \ + /* Set up parameters for ?potrf */ \ + size = m.rows(); \ + StorageOrder = MatrixType::Flags&RowMajorBit?RowMajor:ColMajor; \ + matrix_order = StorageOrder==RowMajor ? LAPACK_ROW_MAJOR : LAPACK_COL_MAJOR; \ + a = &(m.coeffRef(0,0)); \ + lda = m.outerStride(); \ +\ + info = LAPACKE_##MKLPREFIX##potrf( matrix_order, uplo, size, (MKLTYPE*)a, lda ); \ + info = (info==0) ? Success : NumericalIssue; \ + return info; \ + } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static typename MatrixType::Index blocked(MatrixType& m) \ + { \ + return mkl_llt::potrf(m, 'L'); \ + } \ + template \ + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { return Eigen::internal::llt_rank_update_lower(mat, vec, sigma); } \ +}; \ +template<> struct llt_inplace \ +{ \ + template \ + static typename MatrixType::Index blocked(MatrixType& m) \ + { \ + return mkl_llt::potrf(m, 'U'); \ + } \ + template \ + static typename MatrixType::Index rankUpdate(MatrixType& mat, const VectorType& vec, const typename MatrixType::RealScalar& sigma) \ + { \ + Transpose matt(mat); \ + return llt_inplace::rankUpdate(matt, vec.conjugate(), sigma); \ + } \ +}; + +EIGEN_MKL_LLT(double, double, d) +EIGEN_MKL_LLT(float, float, s) +EIGEN_MKL_LLT(dcomplex, MKL_Complex16, z) +EIGEN_MKL_LLT(scomplex, MKL_Complex8, c) + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_LLT_MKL_H diff --git a/latan/Eigen/src/CholmodSupport/CholmodSupport.h b/latan/Eigen/src/CholmodSupport/CholmodSupport.h new file mode 100644 index 0000000..37f1421 --- /dev/null +++ b/latan/Eigen/src/CholmodSupport/CholmodSupport.h @@ -0,0 +1,579 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CHOLMODSUPPORT_H +#define EIGEN_CHOLMODSUPPORT_H + +namespace Eigen { + +namespace internal { + +template +void cholmod_configure_matrix(CholmodType& mat) +{ + if (internal::is_same::value) + { + mat.xtype = CHOLMOD_REAL; + mat.dtype = CHOLMOD_SINGLE; + } + else if (internal::is_same::value) + { + mat.xtype = CHOLMOD_REAL; + mat.dtype = CHOLMOD_DOUBLE; + } + else if (internal::is_same >::value) + { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = CHOLMOD_SINGLE; + } + else if (internal::is_same >::value) + { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = CHOLMOD_DOUBLE; + } + else + { + eigen_assert(false && "Scalar type not supported by CHOLMOD"); + } +} + +} // namespace internal + +/** Wraps the Eigen sparse matrix \a mat into a Cholmod sparse matrix object. + * Note that the data are shared. + */ +template +cholmod_sparse viewAsCholmod(SparseMatrix<_Scalar,_Options,_Index>& mat) +{ + typedef SparseMatrix<_Scalar,_Options,_Index> MatrixType; + cholmod_sparse res; + res.nzmax = mat.nonZeros(); + res.nrow = mat.rows();; + res.ncol = mat.cols(); + res.p = mat.outerIndexPtr(); + res.i = mat.innerIndexPtr(); + res.x = mat.valuePtr(); + res.sorted = 1; + if(mat.isCompressed()) + { + res.packed = 1; + } + else + { + res.packed = 0; + res.nz = mat.innerNonZeroPtr(); + } + + res.dtype = 0; + res.stype = -1; + + if (internal::is_same<_Index,int>::value) + { + res.itype = CHOLMOD_INT; + } + else + { + eigen_assert(false && "Index type different than int is not supported yet"); + } + + // setup res.xtype + internal::cholmod_configure_matrix<_Scalar>(res); + + res.stype = 0; + + return res; +} + +template +const cholmod_sparse viewAsCholmod(const SparseMatrix<_Scalar,_Options,_Index>& mat) +{ + cholmod_sparse res = viewAsCholmod(mat.const_cast_derived()); + return res; +} + +/** Returns a view of the Eigen sparse matrix \a mat as Cholmod sparse matrix. + * The data are not copied but shared. */ +template +cholmod_sparse viewAsCholmod(const SparseSelfAdjointView, UpLo>& mat) +{ + cholmod_sparse res = viewAsCholmod(mat.matrix().const_cast_derived()); + + if(UpLo==Upper) res.stype = 1; + if(UpLo==Lower) res.stype = -1; + + return res; +} + +/** Returns a view of the Eigen \b dense matrix \a mat as Cholmod dense matrix. + * The data are not copied but shared. */ +template +cholmod_dense viewAsCholmod(MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT((internal::traits::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + typedef typename Derived::Scalar Scalar; + + cholmod_dense res; + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.nzmax = res.nrow * res.ncol; + res.d = Derived::IsVectorAtCompileTime ? mat.derived().size() : mat.derived().outerStride(); + res.x = mat.derived().data(); + res.z = 0; + + internal::cholmod_configure_matrix(res); + + return res; +} + +/** Returns a view of the Cholmod sparse matrix \a cm as an Eigen sparse matrix. + * The data are not copied but shared. */ +template +MappedSparseMatrix viewAsEigen(cholmod_sparse& cm) +{ + return MappedSparseMatrix + (cm.nrow, cm.ncol, reinterpret_cast(cm.p)[cm.ncol], + reinterpret_cast(cm.p), reinterpret_cast(cm.i),reinterpret_cast(cm.x) ); +} + +enum CholmodMode { + CholmodAuto, CholmodSimplicialLLt, CholmodSupernodalLLt, CholmodLDLt +}; + + +/** \ingroup CholmodSupport_Module + * \class CholmodBase + * \brief The base class for the direct Cholesky factorization of Cholmod + * \sa class CholmodSupernodalLLT, class CholmodSimplicialLDLT, class CholmodSimplicialLLT + */ +template +class CholmodBase : internal::noncopyable +{ + public: + typedef _MatrixType MatrixType; + enum { UpLo = _UpLo }; + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef MatrixType CholMatrixType; + typedef typename MatrixType::Index Index; + + public: + + CholmodBase() + : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) + { + cholmod_start(&m_cholmod); + } + + CholmodBase(const MatrixType& matrix) + : m_cholmodFactor(0), m_info(Success), m_isInitialized(false) + { + cholmod_start(&m_cholmod); + compute(matrix); + } + + ~CholmodBase() + { + if(m_cholmodFactor) + cholmod_free_factor(&m_cholmodFactor, &m_cholmod); + cholmod_finish(&m_cholmod); + } + + inline Index cols() const { return m_cholmodFactor->n; } + inline Index rows() const { return m_cholmodFactor->n; } + + Derived& derived() { return *static_cast(this); } + const Derived& derived() const { return *static_cast(this); } + + /** \brief Reports whether previous computation was successful. + * + * \returns \c Success if computation was succesful, + * \c NumericalIssue if the matrix.appears to be negative. + */ + ComputationInfo info() const + { + eigen_assert(m_isInitialized && "Decomposition is not initialized."); + return m_info; + } + + /** Computes the sparse Cholesky decomposition of \a matrix */ + Derived& compute(const MatrixType& matrix) + { + analyzePattern(matrix); + factorize(matrix); + return derived(); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::solve_retval + solve(const MatrixBase& b) const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(rows()==b.rows() + && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); + return internal::solve_retval(*this, b.derived()); + } + + /** \returns the solution x of \f$ A x = b \f$ using the current decomposition of A. + * + * \sa compute() + */ + template + inline const internal::sparse_solve_retval + solve(const SparseMatrixBase& b) const + { + eigen_assert(m_isInitialized && "LLT is not initialized."); + eigen_assert(rows()==b.rows() + && "CholmodDecomposition::solve(): invalid number of rows of the right hand side matrix b"); + return internal::sparse_solve_retval(*this, b.derived()); + } + + /** Performs a symbolic decomposition on the sparcity of \a matrix. + * + * This function is particularly useful when solving for several problems having the same structure. + * + * \sa factorize() + */ + void analyzePattern(const MatrixType& matrix) + { + if(m_cholmodFactor) + { + cholmod_free_factor(&m_cholmodFactor, &m_cholmod); + m_cholmodFactor = 0; + } + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); + + this->m_isInitialized = true; + this->m_info = Success; + m_analysisIsOk = true; + m_factorizationIsOk = false; + } + + /** Performs a numeric decomposition of \a matrix + * + * The given matrix must has the same sparcity than the matrix on which the symbolic decomposition has been performed. + * + * \sa analyzePattern() + */ + void factorize(const MatrixType& matrix) + { + eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); + cholmod_sparse A = viewAsCholmod(matrix.template selfadjointView()); + cholmod_factorize(&A, m_cholmodFactor, &m_cholmod); + + this->m_info = Success; + m_factorizationIsOk = true; + } + + /** Returns a reference to the Cholmod's configuration structure to get a full control over the performed operations. + * See the Cholmod user guide for details. */ + cholmod_common& cholmod() { return m_cholmod; } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + template + void _solve(const MatrixBase &b, MatrixBase &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + eigen_assert(size==b.rows()); + + // note: cd stands for Cholmod Dense + cholmod_dense b_cd = viewAsCholmod(b.const_cast_derived()); + cholmod_dense* x_cd = cholmod_solve(CHOLMOD_A, m_cholmodFactor, &b_cd, &m_cholmod); + if(!x_cd) + { + this->m_info = NumericalIssue; + } + // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.) + dest = Matrix::Map(reinterpret_cast(x_cd->x),b.rows(),b.cols()); + cholmod_free_dense(&x_cd, &m_cholmod); + } + + /** \internal */ + template + void _solve(const SparseMatrix &b, SparseMatrix &dest) const + { + eigen_assert(m_factorizationIsOk && "The decomposition is not in a valid state for solving, you must first call either compute() or symbolic()/numeric()"); + const Index size = m_cholmodFactor->n; + eigen_assert(size==b.rows()); + + // note: cs stands for Cholmod Sparse + cholmod_sparse b_cs = viewAsCholmod(b); + cholmod_sparse* x_cs = cholmod_spsolve(CHOLMOD_A, m_cholmodFactor, &b_cs, &m_cholmod); + if(!x_cs) + { + this->m_info = NumericalIssue; + } + // TODO optimize this copy by swapping when possible (be carreful with alignment, etc.) + dest = viewAsEigen(*x_cs); + cholmod_free_sparse(&x_cs, &m_cholmod); + } + #endif // EIGEN_PARSED_BY_DOXYGEN + + template + void dumpMemory(Stream& s) + {} + + protected: + mutable cholmod_common m_cholmod; + cholmod_factor* m_cholmodFactor; + mutable ComputationInfo m_info; + bool m_isInitialized; + int m_factorizationIsOk; + int m_analysisIsOk; +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLLT + * \brief A simplicial direct Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLLT class. Thefore, it has little practical interest. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLLT + */ +template +class CholmodSimplicialLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLLT() : Base() { init(); } + + CholmodSimplicialLLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSimplicialLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + } +}; + + +/** \ingroup CholmodSupport_Module + * \class CholmodSimplicialLDLT + * \brief A simplicial direct Cholesky (LDLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a simplicial LDL^T Cholesky factorization + * using the Cholmod library. + * This simplicial variant is equivalent to Eigen's built-in SimplicialLDLT class. Thefore, it has little practical interest. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers, class CholmodSupernodalLLT, class SimplicialLDLT + */ +template +class CholmodSimplicialLDLT : public CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSimplicialLDLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSimplicialLDLT() : Base() { init(); } + + CholmodSimplicialLDLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSimplicialLDLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodSupernodalLLT + * \brief A supernodal Cholesky (LLT) factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a supernodal LL^T Cholesky factorization + * using the Cholmod library. + * This supernodal variant performs best on dense enough problems, e.g., 3D FEM, or very high order 2D FEM. + * The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class CholmodSupernodalLLT : public CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodSupernodalLLT> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodSupernodalLLT() : Base() { init(); } + + CholmodSupernodalLLT(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodSupernodalLLT() {} + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + } +}; + +/** \ingroup CholmodSupport_Module + * \class CholmodDecomposition + * \brief A general Cholesky factorization and solver based on Cholmod + * + * This class allows to solve for A.X = B sparse linear problems via a LL^T or LDL^T Cholesky factorization + * using the Cholmod library. The sparse matrix A must be selfajoint and positive definite. The vectors or matrices + * X and B can be either dense or sparse. + * + * This variant permits to change the underlying Cholesky method at runtime. + * On the other hand, it does not provide access to the result of the factorization. + * The default is to let Cholmod automatically choose between a simplicial and supernodal factorization. + * + * \tparam _MatrixType the type of the sparse matrix A, it must be a SparseMatrix<> + * \tparam _UpLo the triangular part that will be used for the computations. It can be Lower + * or Upper. Default is Lower. + * + * This class supports all kind of SparseMatrix<>: row or column major; upper, lower, or both; compressed or non compressed. + * + * \sa \ref TutorialSparseDirectSolvers + */ +template +class CholmodDecomposition : public CholmodBase<_MatrixType, _UpLo, CholmodDecomposition<_MatrixType, _UpLo> > +{ + typedef CholmodBase<_MatrixType, _UpLo, CholmodDecomposition> Base; + using Base::m_cholmod; + + public: + + typedef _MatrixType MatrixType; + + CholmodDecomposition() : Base() { init(); } + + CholmodDecomposition(const MatrixType& matrix) : Base() + { + init(); + compute(matrix); + } + + ~CholmodDecomposition() {} + + void setMode(CholmodMode mode) + { + switch(mode) + { + case CholmodAuto: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + break; + case CholmodSimplicialLLt: + m_cholmod.final_asis = 0; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + m_cholmod.final_ll = 1; + break; + case CholmodSupernodalLLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SUPERNODAL; + break; + case CholmodLDLt: + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_SIMPLICIAL; + break; + default: + break; + } + } + protected: + void init() + { + m_cholmod.final_asis = 1; + m_cholmod.supernodal = CHOLMOD_AUTO; + } +}; + +namespace internal { + +template +struct solve_retval, Rhs> + : solve_retval_base, Rhs> +{ + typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; + EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +template +struct sparse_solve_retval, Rhs> + : sparse_solve_retval_base, Rhs> +{ + typedef CholmodBase<_MatrixType,_UpLo,Derived> Dec; + EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs) + + template void evalTo(Dest& dst) const + { + dec()._solve(rhs(),dst); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_CHOLMODSUPPORT_H diff --git a/latan/Eigen/src/Core/Array.h b/latan/Eigen/src/Core/Array.h new file mode 100644 index 0000000..aaa3899 --- /dev/null +++ b/latan/Eigen/src/Core/Array.h @@ -0,0 +1,308 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAY_H +#define EIGEN_ARRAY_H + +namespace Eigen { + +/** \class Array + * \ingroup Core_Module + * + * \brief General-purpose arrays with easy API for coefficient-wise operations + * + * The %Array class is very similar to the Matrix class. It provides + * general-purpose one- and two-dimensional arrays. The difference between the + * %Array and the %Matrix class is primarily in the API: the API for the + * %Array class provides easy access to coefficient-wise operations, while the + * API for the %Matrix class provides easy access to linear-algebra + * operations. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAY_PLUGIN. + * + * \sa \ref TutorialArrayClass, \ref TopicClassHierarchy + */ +namespace internal { +template +struct traits > : traits > +{ + typedef ArrayXpr XprKind; + typedef ArrayBase > XprBase; +}; +} + +template +class Array + : public PlainObjectBase > +{ + public: + + typedef PlainObjectBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Array) + + enum { Options = _Options }; + typedef typename Base::PlainObject PlainObject; + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; + + public: + + using Base::base; + using Base::coeff; + using Base::coeffRef; + + /** + * The usage of + * using Base::operator=; + * fails on MSVC. Since the code below is working with GCC and MSVC, we skipped + * the usage of 'using'. This should be done only for operator=. + */ + template + EIGEN_STRONG_INLINE Array& operator=(const EigenBase &other) + { + return Base::operator=(other); + } + + /** Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE Array& operator=(const ArrayBase& other) + { + return Base::_set(other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_STRONG_INLINE Array& operator=(const Array& other) + { + return Base::_set(other); + } + + /** Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ + EIGEN_STRONG_INLINE explicit Array() : Base() + { + Base::_check_template_params(); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + // FIXME is it still needed ?? + /** \internal */ + Array(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) + { + Base::_check_template_params(); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } +#endif + + /** Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass the dimension here, so it makes more sense to use the default + * constructor Matrix() instead. + */ + EIGEN_STRONG_INLINE explicit Array(Index dim) + : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Array) + eigen_assert(dim >= 0); + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_STRONG_INLINE Array(const T0& x, const T1& y) + { + Base::_check_template_params(); + this->template _init2(x, y); + } + #else + /** constructs an uninitialized matrix with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size matrices. For fixed-size matrices, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. */ + Array(Index rows, Index cols); + /** constructs an initialized 2D vector with given coefficients */ + Array(const Scalar& x, const Scalar& y); + #endif + + /** constructs an initialized 3D vector with given coefficients */ + EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 3) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + } + /** constructs an initialized 4D vector with given coefficients */ + EIGEN_STRONG_INLINE Array(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Array, 4) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + m_storage.data()[3] = w; + } + + explicit Array(const Scalar *data); + + /** Constructor copying the value of the expression \a other */ + template + EIGEN_STRONG_INLINE Array(const ArrayBase& other) + : Base(other.rows() * other.cols(), other.rows(), other.cols()) + { + Base::_check_template_params(); + Base::_set_noalias(other); + } + /** Copy constructor */ + EIGEN_STRONG_INLINE Array(const Array& other) + : Base(other.rows() * other.cols(), other.rows(), other.cols()) + { + Base::_check_template_params(); + Base::_set_noalias(other); + } + /** Copy constructor with in-place evaluation */ + template + EIGEN_STRONG_INLINE Array(const ReturnByValue& other) + { + Base::_check_template_params(); + Base::resize(other.rows(), other.cols()); + other.evalTo(*this); + } + + /** \sa MatrixBase::operator=(const EigenBase&) */ + template + EIGEN_STRONG_INLINE Array(const EigenBase &other) + : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) + { + Base::_check_template_params(); + Base::resize(other.rows(), other.cols()); + *this = other; + } + + /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the + * data pointers. + */ + template + void swap(ArrayBase const & other) + { this->_swap(other.derived()); } + + inline Index innerStride() const { return 1; } + inline Index outerStride() const { return this->innerSize(); } + + #ifdef EIGEN_ARRAY_PLUGIN + #include EIGEN_ARRAY_PLUGIN + #endif + + private: + + template + friend struct internal::matrix_swap_impl; +}; + +/** \defgroup arraytypedefs Global array typedefs + * \ingroup Core_Module + * + * Eigen defines several typedef shortcuts for most common 1D and 2D array types. + * + * The general patterns are the following: + * + * \c ArrayRowsColsType where \c Rows and \c Cols can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Array33d is a fixed-size 3x3 array type of doubles, and \c ArrayXXf is a dynamic-size matrix of floats. + * + * There are also \c ArraySizeType which are self-explanatory. For example, \c Array4cf is + * a fixed-size 1D array of 4 complex floats. + * + * \sa class Array + */ + +#define EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##SizeSuffix##SizeSuffix##TypeSuffix; \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##Size##X##TypeSuffix; \ +/** \ingroup arraytypedefs */ \ +typedef Array Array##X##Size##TypeSuffix; + +#define EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_ARRAY_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ +EIGEN_MAKE_ARRAY_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_ARRAY_TYPEDEFS + +#undef EIGEN_MAKE_ARRAY_TYPEDEFS_LARGE + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ +using Eigen::Matrix##SizeSuffix##TypeSuffix; \ +using Eigen::Vector##SizeSuffix##TypeSuffix; \ +using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(TypeSuffix) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ + +#define EIGEN_USING_ARRAY_TYPEDEFS \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(i) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(f) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(d) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cf) \ +EIGEN_USING_ARRAY_TYPEDEFS_FOR_TYPE(cd) + +} // end namespace Eigen + +#endif // EIGEN_ARRAY_H diff --git a/latan/Eigen/src/Core/ArrayBase.h b/latan/Eigen/src/Core/ArrayBase.h new file mode 100644 index 0000000..004b117 --- /dev/null +++ b/latan/Eigen/src/Core/ArrayBase.h @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYBASE_H +#define EIGEN_ARRAYBASE_H + +namespace Eigen { + +template class MatrixWrapper; + +/** \class ArrayBase + * \ingroup Core_Module + * + * \brief Base class for all 1D and 2D array, and related expressions + * + * An array is similar to a dense vector or matrix. While matrices are mathematical + * objects with well defined linear algebra operators, an array is just a collection + * of scalar values arranged in a one or two dimensionnal fashion. As the main consequence, + * all operations applied to an array are performed coefficient wise. Furthermore, + * arrays support scalar math functions of the c++ standard library (e.g., std::sin(x)), and convenient + * constructors allowing to easily write generic code working for both scalar values + * and arrays. + * + * This class is the base that is inherited by all array expression types. + * + * \tparam Derived is the derived type, e.g., an array or an expression type. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_ARRAYBASE_PLUGIN. + * + * \sa class MatrixBase, \ref TopicClassHierarchy + */ +template class ArrayBase + : public DenseBase +{ + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** The base class for a given storage type. */ + typedef ArrayBase StorageBaseType; + + typedef ArrayBase Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl; + + using internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real>::operator*; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + using Base::CoeffReadCost; + + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::operator=; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily + * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const + * reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either + * PlainObject or const PlainObject&. + */ + typedef Array::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime + > PlainObject; + + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,Derived> ConstantReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::ArrayBase +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/ArrayCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# include "../plugins/ArrayCwiseBinaryOps.h" +# ifdef EIGEN_ARRAYBASE_PLUGIN +# include EIGEN_ARRAYBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + Derived& operator=(const ArrayBase& other) + { + return internal::assign_selector::run(derived(), other.derived()); + } + + Derived& operator+=(const Scalar& scalar) + { return *this = derived() + scalar; } + Derived& operator-=(const Scalar& scalar) + { return *this = derived() - scalar; } + + template + Derived& operator+=(const ArrayBase& other); + template + Derived& operator-=(const ArrayBase& other); + + template + Derived& operator*=(const ArrayBase& other); + + template + Derived& operator/=(const ArrayBase& other); + + public: + ArrayBase& array() { return *this; } + const ArrayBase& array() const { return *this; } + + /** \returns an \link MatrixBase Matrix \endlink expression of this array + * \sa MatrixBase::array() */ + MatrixWrapper matrix() { return derived(); } + const MatrixWrapper matrix() const { return derived(); } + +// template +// inline void evalTo(Dest& dst) const { dst = matrix(); } + + protected: + ArrayBase() : Base() {} + + private: + explicit ArrayBase(Index); + ArrayBase(Index,Index); + template explicit ArrayBase(const ArrayBase&); + protected: + // mixing arrays and matrices is not legal + template Derived& operator+=(const MatrixBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} + // mixing arrays and matrices is not legal + template Derived& operator-=(const MatrixBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +ArrayBase::operator-=(const ArrayBase &other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +ArrayBase::operator+=(const ArrayBase& other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +/** replaces \c *this by \c *this * \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +ArrayBase::operator*=(const ArrayBase& other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +/** replaces \c *this by \c *this / \a other coefficient wise. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +ArrayBase::operator/=(const ArrayBase& other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ARRAYBASE_H diff --git a/latan/Eigen/src/Core/ArrayWrapper.h b/latan/Eigen/src/Core/ArrayWrapper.h new file mode 100644 index 0000000..65ffd64 --- /dev/null +++ b/latan/Eigen/src/Core/ArrayWrapper.h @@ -0,0 +1,254 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ARRAYWRAPPER_H +#define EIGEN_ARRAYWRAPPER_H + +namespace Eigen { + +/** \class ArrayWrapper + * \ingroup Core_Module + * + * \brief Expression of a mathematical vector or matrix as an array object + * + * This class is the return type of MatrixBase::array(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::array(), class MatrixWrapper + */ + +namespace internal { +template +struct traits > + : public traits::type > +{ + typedef ArrayXpr XprKind; +}; +} + +template +class ArrayWrapper : public ArrayBase > +{ + public: + typedef ArrayBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ArrayWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(ArrayWrapper) + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + typedef typename internal::nested::type NestedExpressionType; + + inline ArrayWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } + + inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + inline const Scalar* data() const { return m_expression.data(); } + + inline CoeffReturnType coeff(Index row, Index col) const + { + return m_expression.coeff(row, col); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline CoeffReturnType coeff(Index index) const + { + return m_expression.coeff(index); + } + + inline Scalar& coeffRef(Index index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + template + inline void evalTo(Dest& dst) const { dst = m_expression; } + + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } + + protected: + NestedExpressionType m_expression; +}; + +/** \class MatrixWrapper + * \ingroup Core_Module + * + * \brief Expression of an array as a mathematical vector or matrix + * + * This class is the return type of ArrayBase::matrix(), and most of the time + * this is the only way it is use. + * + * \sa MatrixBase::matrix(), class ArrayWrapper + */ + +namespace internal { +template +struct traits > + : public traits::type > +{ + typedef MatrixXpr XprKind; +}; +} + +template +class MatrixWrapper : public MatrixBase > +{ + public: + typedef MatrixBase > Base; + EIGEN_DENSE_PUBLIC_INTERFACE(MatrixWrapper) + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(MatrixWrapper) + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + typedef typename internal::nested::type NestedExpressionType; + + inline MatrixWrapper(ExpressionType& matrix) : m_expression(matrix) {} + + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } + + inline ScalarWithConstIfNotLvalue* data() { return m_expression.data(); } + inline const Scalar* data() const { return m_expression.data(); } + + inline CoeffReturnType coeff(Index row, Index col) const + { + return m_expression.coeff(row, col); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_expression.derived().coeffRef(row, col); + } + + inline CoeffReturnType coeff(Index index) const + { + return m_expression.coeff(index); + } + + inline Scalar& coeffRef(Index index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + const typename internal::remove_all::type& + nestedExpression() const + { + return m_expression; + } + + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index) */ + void resize(Index newSize) { m_expression.const_cast_derived().resize(newSize); } + /** Forwards the resizing request to the nested expression + * \sa DenseBase::resize(Index,Index)*/ + void resize(Index nbRows, Index nbCols) { m_expression.const_cast_derived().resize(nbRows,nbCols); } + + protected: + NestedExpressionType m_expression; +}; + +} // end namespace Eigen + +#endif // EIGEN_ARRAYWRAPPER_H diff --git a/latan/Eigen/src/Core/Assign.h b/latan/Eigen/src/Core/Assign.h new file mode 100644 index 0000000..cd29a88 --- /dev/null +++ b/latan/Eigen/src/Core/Assign.h @@ -0,0 +1,583 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007 Michael Olbrich +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ASSIGN_H +#define EIGEN_ASSIGN_H + +namespace Eigen { + +namespace internal { + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for traversal and unrolling * +***************************************************************************/ + +template +struct assign_traits +{ +public: + enum { + DstIsAligned = Derived::Flags & AlignedBit, + DstHasDirectAccess = Derived::Flags & DirectAccessBit, + SrcIsAligned = OtherDerived::Flags & AlignedBit, + JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned + }; + +private: + enum { + InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime) + : int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime) + : int(Derived::RowsAtCompileTime), + InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime) + : int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime) + : int(Derived::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Derived::SizeAtCompileTime, + PacketSize = packet_traits::size + }; + + enum { + StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)), + MightVectorize = StorageOrdersAgree + && (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit), + MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0 + && int(DstIsAligned) && int(SrcIsAligned), + MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit), + MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess + && (DstIsAligned || MaxSizeAtCompileTime == Dynamic), + /* If the destination isn't aligned, we have to do runtime checks and we don't unroll, + so it's only good for large enough sizes. */ + MaySliceVectorize = MightVectorize && DstHasDirectAccess + && (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize) + /* slice vectorization can be slow, so we only want it if the slices are big, which is + indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block + in a fixed-size matrix */ + }; + +public: + enum { + Traversal = int(MayInnerVectorize) ? int(InnerVectorizedTraversal) + : int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(MayLinearize) ? int(LinearTraversal) + : int(DefaultTraversal), + Vectorized = int(Traversal) == InnerVectorizedTraversal + || int(Traversal) == LinearVectorizedTraversal + || int(Traversal) == SliceVectorizedTraversal + }; + +private: + enum { + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1), + MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic + && int(OtherDerived::CoeffReadCost) != Dynamic + && int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit), + MayUnrollInner = int(InnerSize) != Dynamic + && int(OtherDerived::CoeffReadCost) != Dynamic + && int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit) + }; + +public: + enum { + Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal)) + ? ( + int(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling) + ) + : int(Traversal) == int(LinearVectorizedTraversal) + ? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) + : int(Traversal) == int(LinearTraversal) + ? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling) : int(NoUnrolling) ) + : int(NoUnrolling) + }; + +#ifdef EIGEN_DEBUG_ASSIGN + static void debug() + { + EIGEN_DEBUG_VAR(DstIsAligned) + EIGEN_DEBUG_VAR(SrcIsAligned) + EIGEN_DEBUG_VAR(JointAlignment) + EIGEN_DEBUG_VAR(InnerSize) + EIGEN_DEBUG_VAR(InnerMaxSize) + EIGEN_DEBUG_VAR(PacketSize) + EIGEN_DEBUG_VAR(StorageOrdersAgree) + EIGEN_DEBUG_VAR(MightVectorize) + EIGEN_DEBUG_VAR(MayLinearize) + EIGEN_DEBUG_VAR(MayInnerVectorize) + EIGEN_DEBUG_VAR(MayLinearVectorize) + EIGEN_DEBUG_VAR(MaySliceVectorize) + EIGEN_DEBUG_VAR(Traversal) + EIGEN_DEBUG_VAR(UnrollingLimit) + EIGEN_DEBUG_VAR(MayUnrollCompletely) + EIGEN_DEBUG_VAR(MayUnrollInner) + EIGEN_DEBUG_VAR(Unrolling) + } +#endif +}; + +/*************************************************************************** +* Part 2 : meta-unrollers +***************************************************************************/ + +/************************ +*** Default traversal *** +************************/ + +template +struct assign_DefaultTraversal_CompleteUnrolling +{ + enum { + outer = Index / Derived1::InnerSizeAtCompileTime, + inner = Index % Derived1::InnerSizeAtCompileTime + }; + + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + dst.copyCoeffByOuterInner(outer, inner, src); + assign_DefaultTraversal_CompleteUnrolling::run(dst, src); + } +}; + +template +struct assign_DefaultTraversal_CompleteUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} +}; + +template +struct assign_DefaultTraversal_InnerUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer) + { + dst.copyCoeffByOuterInner(outer, Index, src); + assign_DefaultTraversal_InnerUnrolling::run(dst, src, outer); + } +}; + +template +struct assign_DefaultTraversal_InnerUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {} +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct assign_LinearTraversal_CompleteUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + dst.copyCoeff(Index, src); + assign_LinearTraversal_CompleteUnrolling::run(dst, src); + } +}; + +template +struct assign_LinearTraversal_CompleteUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct assign_innervec_CompleteUnrolling +{ + enum { + outer = Index / Derived1::InnerSizeAtCompileTime, + inner = Index % Derived1::InnerSizeAtCompileTime, + JointAlignment = assign_traits::JointAlignment + }; + + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + dst.template copyPacketByOuterInner(outer, inner, src); + assign_innervec_CompleteUnrolling::size, Stop>::run(dst, src); + } +}; + +template +struct assign_innervec_CompleteUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &) {} +}; + +template +struct assign_innervec_InnerUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src, int outer) + { + dst.template copyPacketByOuterInner(outer, Index, src); + assign_innervec_InnerUnrolling::size, Stop>::run(dst, src, outer); + } +}; + +template +struct assign_innervec_InnerUnrolling +{ + static EIGEN_STRONG_INLINE void run(Derived1 &, const Derived2 &, int) {} +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Traversal, + int Unrolling = assign_traits::Unrolling, + int Version = Specialized> +struct assign_impl; + +/************************ +*** Default traversal *** +************************/ + +template +struct assign_impl +{ + static inline void run(Derived1 &, const Derived2 &) { } +}; + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1 &dst, const Derived2 &src) + { + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; ++inner) + dst.copyCoeffByOuterInner(outer, inner, src); + } +}; + +template +struct assign_impl +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + assign_DefaultTraversal_CompleteUnrolling + ::run(dst, src); + } +}; + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + assign_DefaultTraversal_InnerUnrolling + ::run(dst, src, outer); + } +}; + +/*********************** +*** Linear traversal *** +***********************/ + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1 &dst, const Derived2 &src) + { + const Index size = dst.size(); + for(Index i = 0; i < size; ++i) + dst.copyCoeff(i, src); + } +}; + +template +struct assign_impl +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + assign_LinearTraversal_CompleteUnrolling + ::run(dst, src); + } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1 &dst, const Derived2 &src) + { + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index packetSize = packet_traits::size; + for(Index outer = 0; outer < outerSize; ++outer) + for(Index inner = 0; inner < innerSize; inner+=packetSize) + dst.template copyPacketByOuterInner(outer, inner, src); + } +}; + +template +struct assign_impl +{ + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + assign_innervec_CompleteUnrolling + ::run(dst, src); + } +}; + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) + assign_innervec_InnerUnrolling + ::run(dst, src, outer); + } +}; + +/*************************** +*** Linear vectorization *** +***************************/ + +template +struct unaligned_assign_impl +{ + template + static EIGEN_STRONG_INLINE void run(const Derived&, OtherDerived&, typename Derived::Index, typename Derived::Index) {} +}; + +template <> +struct unaligned_assign_impl +{ + // MSVC must not inline this functions. If it does, it fails to optimize the + // packet access path. +#ifdef _MSC_VER + template + static EIGEN_DONT_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) +#else + template + static EIGEN_STRONG_INLINE void run(const Derived& src, OtherDerived& dst, typename Derived::Index start, typename Derived::Index end) +#endif + { + for (typename Derived::Index index = start; index < end; ++index) + dst.copyCoeff(index, src); + } +}; + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + const Index size = dst.size(); + typedef packet_traits PacketTraits; + enum { + packetSize = PacketTraits::size, + dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : int(assign_traits::DstIsAligned) , + srcAlignment = assign_traits::JointAlignment + }; + const Index alignedStart = assign_traits::DstIsAligned ? 0 + : internal::first_aligned(&dst.coeffRef(0), size); + const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; + + unaligned_assign_impl::DstIsAligned!=0>::run(src,dst,0,alignedStart); + + for(Index index = alignedStart; index < alignedEnd; index += packetSize) + { + dst.template copyPacket(index, src); + } + + unaligned_assign_impl<>::run(src,dst,alignedEnd,size); + } +}; + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static EIGEN_STRONG_INLINE void run(Derived1 &dst, const Derived2 &src) + { + enum { size = Derived1::SizeAtCompileTime, + packetSize = packet_traits::size, + alignedSize = (size/packetSize)*packetSize }; + + assign_innervec_CompleteUnrolling::run(dst, src); + assign_DefaultTraversal_CompleteUnrolling::run(dst, src); + } +}; + +/************************** +*** Slice vectorization *** +***************************/ + +template +struct assign_impl +{ + typedef typename Derived1::Index Index; + static inline void run(Derived1 &dst, const Derived2 &src) + { + typedef packet_traits PacketTraits; + enum { + packetSize = PacketTraits::size, + alignable = PacketTraits::AlignedOnScalar, + dstAlignment = alignable ? Aligned : int(assign_traits::DstIsAligned) , + srcAlignment = assign_traits::JointAlignment + }; + const Index packetAlignedMask = packetSize - 1; + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0; + Index alignedStart = ((!alignable) || assign_traits::DstIsAligned) ? 0 + : internal::first_aligned(&dst.coeffRef(0,0), innerSize); + + for(Index outer = 0; outer < outerSize; ++outer) + { + const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); + // do the non-vectorizable part of the assignment + for(Index inner = 0; inner(outer, inner, src); + + // do the non-vectorizable part of the assignment + for(Index inner = alignedEnd; inner((alignedStart+alignedStep)%packetSize, innerSize); + } + } +}; + +} // end namespace internal + +/*************************************************************************** +* Part 4 : implementation of DenseBase methods +***************************************************************************/ + +template +template +EIGEN_STRONG_INLINE Derived& DenseBase + ::lazyAssign(const DenseBase& other) +{ + enum{ + SameType = internal::is_same::value + }; + + EIGEN_STATIC_ASSERT_LVALUE(Derived) + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT(SameType,YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + +#ifdef EIGEN_DEBUG_ASSIGN + internal::assign_traits::debug(); +#endif + eigen_assert(rows() == other.rows() && cols() == other.cols()); + internal::assign_impl::Traversal) + : int(InvalidTraversal)>::run(derived(),other.derived()); +#ifndef EIGEN_NO_DEBUG + checkTransposeAliasing(other.derived()); +#endif + return derived(); +} + +namespace internal { + +template +struct assign_selector; + +template +struct assign_selector { + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } +}; +template +struct assign_selector { + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } +}; +template +struct assign_selector { + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } +}; +template +struct assign_selector { + static EIGEN_STRONG_INLINE Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } +}; + +} // end namespace internal + +template +template +EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) +{ + return internal::assign_selector::run(derived(), other.derived()); +} + +template +EIGEN_STRONG_INLINE Derived& DenseBase::operator=(const DenseBase& other) +{ + return internal::assign_selector::run(derived(), other.derived()); +} + +template +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const MatrixBase& other) +{ + return internal::assign_selector::run(derived(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const DenseBase& other) +{ + return internal::assign_selector::run(derived(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const EigenBase& other) +{ + other.derived().evalTo(derived()); + return derived(); +} + +template +template +EIGEN_STRONG_INLINE Derived& MatrixBase::operator=(const ReturnByValue& other) +{ + other.evalTo(derived()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_H diff --git a/latan/Eigen/src/Core/Assign_MKL.h b/latan/Eigen/src/Core/Assign_MKL.h new file mode 100644 index 0000000..428c636 --- /dev/null +++ b/latan/Eigen/src/Core/Assign_MKL.h @@ -0,0 +1,224 @@ +/* + Copyright (c) 2011, Intel Corporation. All rights reserved. + + Redistribution and use in source and binary forms, with or without modification, + are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR + ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ******************************************************************************** + * Content : Eigen bindings to Intel(R) MKL + * MKL VML support for coefficient-wise unary Eigen expressions like a=b.sin() + ******************************************************************************** +*/ + +#ifndef EIGEN_ASSIGN_VML_H +#define EIGEN_ASSIGN_VML_H + +namespace Eigen { + +namespace internal { + +template struct vml_call +{ enum { IsSupported = 0 }; }; + +template +class vml_assign_traits +{ + private: + enum { + DstHasDirectAccess = Dst::Flags & DirectAccessBit, + SrcHasDirectAccess = Src::Flags & DirectAccessBit, + + StorageOrdersAgree = (int(Dst::IsRowMajor) == int(Src::IsRowMajor)), + InnerSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::SizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::ColsAtCompileTime) + : int(Dst::RowsAtCompileTime), + InnerMaxSize = int(Dst::IsVectorAtCompileTime) ? int(Dst::MaxSizeAtCompileTime) + : int(Dst::Flags)&RowMajorBit ? int(Dst::MaxColsAtCompileTime) + : int(Dst::MaxRowsAtCompileTime), + MaxSizeAtCompileTime = Dst::SizeAtCompileTime, + + MightEnableVml = vml_call::IsSupported && StorageOrdersAgree && DstHasDirectAccess && SrcHasDirectAccess + && Src::InnerStrideAtCompileTime==1 && Dst::InnerStrideAtCompileTime==1, + MightLinearize = MightEnableVml && (int(Dst::Flags) & int(Src::Flags) & LinearAccessBit), + VmlSize = MightLinearize ? MaxSizeAtCompileTime : InnerMaxSize, + LargeEnough = VmlSize==Dynamic || VmlSize>=EIGEN_MKL_VML_THRESHOLD, + MayEnableVml = MightEnableVml && LargeEnough, + MayLinearize = MayEnableVml && MightLinearize + }; + public: + enum { + Traversal = MayLinearize ? LinearVectorizedTraversal + : MayEnableVml ? InnerVectorizedTraversal + : DefaultTraversal + }; +}; + +template::Traversal > +struct vml_assign_impl + : assign_impl,Traversal,Unrolling,BuiltIn> +{ +}; + +template +struct vml_assign_impl +{ + typedef typename Derived1::Scalar Scalar; + typedef typename Derived1::Index Index; + static inline void run(Derived1& dst, const CwiseUnaryOp& src) + { + // in case we want to (or have to) skip VML at runtime we can call: + // assign_impl,Traversal,Unrolling,BuiltIn>::run(dst,src); + const Index innerSize = dst.innerSize(); + const Index outerSize = dst.outerSize(); + for(Index outer = 0; outer < outerSize; ++outer) { + const Scalar *src_ptr = src.IsRowMajor ? &(src.nestedExpression().coeffRef(outer,0)) : + &(src.nestedExpression().coeffRef(0, outer)); + Scalar *dst_ptr = dst.IsRowMajor ? &(dst.coeffRef(outer,0)) : &(dst.coeffRef(0, outer)); + vml_call::run(src.functor(), innerSize, src_ptr, dst_ptr ); + } + } +}; + +template +struct vml_assign_impl +{ + static inline void run(Derived1& dst, const CwiseUnaryOp& src) + { + // in case we want to (or have to) skip VML at runtime we can call: + // assign_impl,Traversal,Unrolling,BuiltIn>::run(dst,src); + vml_call::run(src.functor(), dst.size(), src.nestedExpression().data(), dst.data() ); + } +}; + +// Macroses + +#define EIGEN_MKL_VML_SPECIALIZE_ASSIGN(TRAVERSAL,UNROLLING) \ + template \ + struct assign_impl, TRAVERSAL, UNROLLING, Specialized> { \ + static inline void run(Derived1 &dst, const Eigen::CwiseUnaryOp &src) { \ + vml_assign_impl::run(dst, src); \ + } \ + }; + +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(DefaultTraversal,InnerUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(InnerVectorizedTraversal,InnerUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,CompleteUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(LinearVectorizedTraversal,NoUnrolling) +EIGEN_MKL_VML_SPECIALIZE_ASSIGN(SliceVectorizedTraversal,NoUnrolling) + + +#if !defined (EIGEN_FAST_MATH) || (EIGEN_FAST_MATH != 1) +#define EIGEN_MKL_VML_MODE VML_HA +#else +#define EIGEN_MKL_VML_MODE VML_LA +#endif + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& /*func*/, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& /*func*/, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \ + VMLOP(size, (const VMLTYPE*)src, (VMLTYPE*)dst, vmlMode); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_POW_CALL(EIGENOP, VMLOP, EIGENTYPE, VMLTYPE) \ + template<> struct vml_call< scalar_##EIGENOP##_op > { \ + enum { IsSupported = 1 }; \ + static inline void run( const scalar_##EIGENOP##_op& func, \ + int size, const EIGENTYPE* src, EIGENTYPE* dst) { \ + EIGENTYPE exponent = func.m_exponent; \ + MKL_INT64 vmlMode = EIGEN_MKL_VML_MODE; \ + VMLOP(&size, (const VMLTYPE*)src, (const VMLTYPE*)&exponent, \ + (VMLTYPE*)dst, &vmlMode); \ + } \ + }; + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vs##VMLOP, float, float) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vd##VMLOP, double, double) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vc##VMLOP, scomplex, MKL_Complex8) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL(EIGENOP, vz##VMLOP, dcomplex, MKL_Complex16) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX(EIGENOP, VMLOP) + + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vms##VMLOP, float, float) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmd##VMLOP, double, double) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmc##VMLOP, scomplex, MKL_Complex8) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALL_LA(EIGENOP, vmz##VMLOP, dcomplex, MKL_Complex16) + +#define EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL_LA(EIGENOP, VMLOP) \ + EIGEN_MKL_VML_DECLARE_UNARY_CALLS_COMPLEX_LA(EIGENOP, VMLOP) + + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sin, Sin) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(asin, Asin) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(cos, Cos) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(acos, Acos) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(tan, Tan) +//EIGEN_MKL_VML_DECLARE_UNARY_CALLS(abs, Abs) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(exp, Exp) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(log, Ln) +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_LA(sqrt, Sqrt) + +EIGEN_MKL_VML_DECLARE_UNARY_CALLS_REAL(square, Sqr) + +// The vm*powx functions are not avaibale in the windows version of MKL. +#ifdef _WIN32 +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmspowx_, float, float) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmdpowx_, double, double) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmcpowx_, scomplex, MKL_Complex8) +EIGEN_MKL_VML_DECLARE_POW_CALL(pow, vmzpowx_, dcomplex, MKL_Complex16) +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_ASSIGN_VML_H diff --git a/latan/Eigen/src/Core/BandMatrix.h b/latan/Eigen/src/Core/BandMatrix.h new file mode 100644 index 0000000..ffd7fe8 --- /dev/null +++ b/latan/Eigen/src/Core/BandMatrix.h @@ -0,0 +1,334 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BANDMATRIX_H +#define EIGEN_BANDMATRIX_H + +namespace Eigen { + +namespace internal { + +template +class BandMatrixBase : public EigenBase +{ + public: + + enum { + Flags = internal::traits::Flags, + CoeffReadCost = internal::traits::CoeffReadCost, + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + Supers = internal::traits::Supers, + Subs = internal::traits::Subs, + Options = internal::traits::Options + }; + typedef typename internal::traits::Scalar Scalar; + typedef Matrix DenseMatrixType; + typedef typename DenseMatrixType::Index Index; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef EigenBase Base; + + protected: + enum { + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) + ? 1 + Supers + Subs + : Dynamic, + SizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime) + }; + + public: + + using Base::derived; + using Base::rows; + using Base::cols; + + /** \returns the number of super diagonals */ + inline Index supers() const { return derived().supers(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return derived().subs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline const CoefficientsType& coeffs() const { return derived().coeffs(); } + + /** \returns an expression of the underlying coefficient matrix */ + inline CoefficientsType& coeffs() { return derived().coeffs(); } + + /** \returns a vector expression of the \a i -th column, + * only the meaningful part is returned. + * \warning the internal storage must be column major. */ + inline Block col(Index i) + { + EIGEN_STATIC_ASSERT((Options&RowMajor)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + Index start = 0; + Index len = coeffs().rows(); + if (i<=supers()) + { + start = supers()-i; + len = (std::min)(rows(),std::max(0,coeffs().rows() - (supers()-i))); + } + else if (i>=rows()-subs()) + len = std::max(0,coeffs().rows() - (i + 1 - rows() + subs())); + return Block(coeffs(), start, i, len, 1); + } + + /** \returns a vector expression of the main diagonal */ + inline Block diagonal() + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } + + /** \returns a vector expression of the main diagonal (const version) */ + inline const Block diagonal() const + { return Block(coeffs(),supers(),0,1,(std::min)(rows(),cols())); } + + template struct DiagonalIntReturnType { + enum { + ReturnOpposite = (Options&SelfAdjoint) && (((Index)>0 && Supers==0) || ((Index)<0 && Subs==0)), + Conjugate = ReturnOpposite && NumTraits::IsComplex, + ActualIndex = ReturnOpposite ? -Index : Index, + DiagonalSize = (RowsAtCompileTime==Dynamic || ColsAtCompileTime==Dynamic) + ? Dynamic + : (ActualIndex<0 + ? EIGEN_SIZE_MIN_PREFER_DYNAMIC(ColsAtCompileTime, RowsAtCompileTime + ActualIndex) + : EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime - ActualIndex)) + }; + typedef Block BuildType; + typedef typename internal::conditional,BuildType >, + BuildType>::type Type; + }; + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline typename DiagonalIntReturnType::Type diagonal() + { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a N -th sub or super diagonal */ + template inline const typename DiagonalIntReturnType::Type diagonal() const + { + return typename DiagonalIntReturnType::BuildType(coeffs(), supers()-N, (std::max)(0,N), 1, diagonalLength(N)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline Block diagonal(Index i) + { + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); + } + + /** \returns a vector expression of the \a i -th sub or super diagonal */ + inline const Block diagonal(Index i) const + { + eigen_assert((i<0 && -i<=subs()) || (i>=0 && i<=supers())); + return Block(coeffs(), supers()-i, std::max(0,i), 1, diagonalLength(i)); + } + + template inline void evalTo(Dest& dst) const + { + dst.resize(rows(),cols()); + dst.setZero(); + dst.diagonal() = diagonal(); + for (Index i=1; i<=supers();++i) + dst.diagonal(i) = diagonal(i); + for (Index i=1; i<=subs();++i) + dst.diagonal(-i) = diagonal(-i); + } + + DenseMatrixType toDenseMatrix() const + { + DenseMatrixType res(rows(),cols()); + evalTo(res); + return res; + } + + protected: + + inline Index diagonalLength(Index i) const + { return i<0 ? (std::min)(cols(),rows()+i) : (std::min)(rows(),cols()-i); } +}; + +/** + * \class BandMatrix + * \ingroup Core_Module + * + * \brief Represents a rectangular matrix with a banded storage + * + * \param _Scalar Numeric type, i.e. float, double, int + * \param Rows Number of rows, or \b Dynamic + * \param Cols Number of columns, or \b Dynamic + * \param Supers Number of super diagonal + * \param Subs Number of sub diagonal + * \param _Options A combination of either \b #RowMajor or \b #ColMajor, and of \b #SelfAdjoint + * The former controls \ref TopicStorageOrders "storage order", and defaults to + * column-major. The latter controls whether the matrix represents a selfadjoint + * matrix in which case either Supers of Subs have to be null. + * + * \sa class TridiagonalMatrix + */ + +template +struct traits > +{ + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef DenseIndex Index; + enum { + CoeffReadCost = NumTraits::ReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef Matrix CoefficientsType; +}; + +template +class BandMatrix : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::CoefficientsType CoefficientsType; + + inline BandMatrix(Index rows=Rows, Index cols=Cols, Index supers=Supers, Index subs=Subs) + : m_coeffs(1+supers+subs,cols), + m_rows(rows), m_supers(supers), m_subs(subs) + { + } + + /** \returns the number of columns */ + inline Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + inline CoefficientsType& coeffs() { return m_coeffs; } + + protected: + + CoefficientsType m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +template +class BandMatrixWrapper; + +template +struct traits > +{ + typedef typename _CoefficientsType::Scalar Scalar; + typedef typename _CoefficientsType::StorageKind StorageKind; + typedef typename _CoefficientsType::Index Index; + enum { + CoeffReadCost = internal::traits<_CoefficientsType>::CoeffReadCost, + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _Rows, + MaxColsAtCompileTime = _Cols, + Flags = LvalueBit, + Supers = _Supers, + Subs = _Subs, + Options = _Options, + DataRowsAtCompileTime = ((Supers!=Dynamic) && (Subs!=Dynamic)) ? 1 + Supers + Subs : Dynamic + }; + typedef _CoefficientsType CoefficientsType; +}; + +template +class BandMatrixWrapper : public BandMatrixBase > +{ + public: + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::traits::CoefficientsType CoefficientsType; + typedef typename internal::traits::Index Index; + + inline BandMatrixWrapper(const CoefficientsType& coeffs, Index rows=_Rows, Index cols=_Cols, Index supers=_Supers, Index subs=_Subs) + : m_coeffs(coeffs), + m_rows(rows), m_supers(supers), m_subs(subs) + { + EIGEN_UNUSED_VARIABLE(cols); + //internal::assert(coeffs.cols()==cols() && (supers()+subs()+1)==coeffs.rows()); + } + + /** \returns the number of columns */ + inline Index rows() const { return m_rows.value(); } + + /** \returns the number of rows */ + inline Index cols() const { return m_coeffs.cols(); } + + /** \returns the number of super diagonals */ + inline Index supers() const { return m_supers.value(); } + + /** \returns the number of sub diagonals */ + inline Index subs() const { return m_subs.value(); } + + inline const CoefficientsType& coeffs() const { return m_coeffs; } + + protected: + + const CoefficientsType& m_coeffs; + internal::variable_if_dynamic m_rows; + internal::variable_if_dynamic m_supers; + internal::variable_if_dynamic m_subs; +}; + +/** + * \class TridiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a tridiagonal matrix with a compact banded storage + * + * \param _Scalar Numeric type, i.e. float, double, int + * \param Size Number of rows and cols, or \b Dynamic + * \param _Options Can be 0 or \b SelfAdjoint + * + * \sa class BandMatrix + */ +template +class TridiagonalMatrix : public BandMatrix +{ + typedef BandMatrix Base; + typedef typename Base::Index Index; + public: + TridiagonalMatrix(Index size = Size) : Base(size,size,Options&SelfAdjoint?0:1,1) {} + + inline typename Base::template DiagonalIntReturnType<1>::Type super() + { return Base::template diagonal<1>(); } + inline const typename Base::template DiagonalIntReturnType<1>::Type super() const + { return Base::template diagonal<1>(); } + inline typename Base::template DiagonalIntReturnType<-1>::Type sub() + { return Base::template diagonal<-1>(); } + inline const typename Base::template DiagonalIntReturnType<-1>::Type sub() const + { return Base::template diagonal<-1>(); } + protected: +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_BANDMATRIX_H diff --git a/latan/Eigen/src/Core/Block.h b/latan/Eigen/src/Core/Block.h new file mode 100644 index 0000000..5f29cb3 --- /dev/null +++ b/latan/Eigen/src/Core/Block.h @@ -0,0 +1,357 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_BLOCK_H +#define EIGEN_BLOCK_H + +namespace Eigen { + +/** \class Block + * \ingroup Core_Module + * + * \brief Expression of a fixed-size or dynamic-size block + * + * \param XprType the type of the expression in which we are taking a block + * \param BlockRows the number of rows of the block we are taking at compile time (optional) + * \param BlockCols the number of columns of the block we are taking at compile time (optional) + * \param _DirectAccessStatus \internal used for partial specialization + * + * This class represents an expression of either a fixed-size or dynamic-size block. It is the return + * type of DenseBase::block(Index,Index,Index,Index) and DenseBase::block(Index,Index) and + * most of the time this is the only way it is used. + * + * However, if you want to directly maniputate block expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. + * + * Here is an example illustrating the dynamic case: + * \include class_Block.cpp + * Output: \verbinclude class_Block.out + * + * \note Even though this expression has dynamic size, in the case where \a XprType + * has fixed size, this expression inherits a fixed maximal size which means that evaluating + * it does not cause a dynamic memory allocation. + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedBlock.cpp + * Output: \verbinclude class_FixedBlock.out + * + * \sa DenseBase::block(Index,Index,Index,Index), DenseBase::block(Index,Index), class VectorBlock + */ + +namespace internal { +template +struct traits > : traits +{ + typedef typename traits::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename nested::type XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + enum{ + MatrixRows = traits::RowsAtCompileTime, + MatrixCols = traits::ColsAtCompileTime, + RowsAtCompileTime = MatrixRows == 0 ? 0 : BlockRows, + ColsAtCompileTime = MatrixCols == 0 ? 0 : BlockCols, + MaxRowsAtCompileTime = BlockRows==0 ? 0 + : RowsAtCompileTime != Dynamic ? int(RowsAtCompileTime) + : int(traits::MaxRowsAtCompileTime), + MaxColsAtCompileTime = BlockCols==0 ? 0 + : ColsAtCompileTime != Dynamic ? int(ColsAtCompileTime) + : int(traits::MaxColsAtCompileTime), + XprTypeIsRowMajor = (int(traits::Flags)&RowMajorBit) != 0, + IsRowMajor = (MaxRowsAtCompileTime==1&&MaxColsAtCompileTime!=1) ? 1 + : (MaxColsAtCompileTime==1&&MaxRowsAtCompileTime!=1) ? 0 + : XprTypeIsRowMajor, + HasSameStorageOrderAsXprType = (IsRowMajor == XprTypeIsRowMajor), + InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerStrideAtCompileTime = HasSameStorageOrderAsXprType + ? int(inner_stride_at_compile_time::ret) + : int(outer_stride_at_compile_time::ret), + OuterStrideAtCompileTime = HasSameStorageOrderAsXprType + ? int(outer_stride_at_compile_time::ret) + : int(inner_stride_at_compile_time::ret), + MaskPacketAccessBit = (InnerSize == Dynamic || (InnerSize % packet_traits::size) == 0) + && (InnerStrideAtCompileTime == 1) + ? PacketAccessBit : 0, + MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % 16) == 0)) ? AlignedBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + FlagsLvalueBit = is_lvalue::value ? LvalueBit : 0, + FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0, + Flags0 = traits::Flags & ( (HereditaryBits & ~RowMajorBit) | + DirectAccessBit | + MaskPacketAccessBit | + MaskAlignedBit), + Flags = Flags0 | FlagsLinearAccessBit | FlagsLvalueBit | FlagsRowMajorBit + }; +}; +} + +template class Block + : public internal::dense_xpr_base >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Block) + + class InnerIterator; + + /** Column or Row constructor + */ + inline Block(XprType& xpr, Index i) + : m_xpr(xpr), + // It is a row if and only if BlockRows==1 and BlockCols==XprType::ColsAtCompileTime, + // and it is a column if and only if BlockRows==XprType::RowsAtCompileTime and BlockCols==1, + // all other cases are invalid. + // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(BlockRows==1 ? 1 : xpr.rows()), + m_blockCols(BlockCols==1 ? 1 : xpr.cols()) + { + eigen_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() + && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); + } + + /** Dynamic-size constructor + */ + inline Block(XprType& xpr, + Index startRow, Index startCol, + Index blockRows, Index blockCols) + : m_xpr(xpr), m_startRow(startRow), m_startCol(startCol), + m_blockRows(blockRows), m_blockCols(blockCols) + { + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() + && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + inline Index rows() const { return m_blockRows.value(); } + inline Index cols() const { return m_blockCols.value(); } + + inline Scalar& coeffRef(Index row, Index col) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.const_cast_derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_xpr.derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + EIGEN_STRONG_INLINE const CoeffReturnType coeff(Index row, Index col) const + { + return m_xpr.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + + inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(XprType) + return m_xpr.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_xpr.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const CoeffReturnType coeff(Index index) const + { + return m_xpr + .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline PacketScalar packet(Index row, Index col) const + { + return m_xpr.template packet + (row + m_startRow.value(), col + m_startCol.value()); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_xpr.const_cast_derived().template writePacket + (row + m_startRow.value(), col + m_startCol.value(), x); + } + + template + inline PacketScalar packet(Index index) const + { + return m_xpr.template packet + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_xpr.const_cast_derived().template writePacket + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x); + } + + #ifdef EIGEN_PARSED_BY_DOXYGEN + /** \sa MapBase::data() */ + inline const Scalar* data() const; + inline Index innerStride() const; + inline Index outerStride() const; + #endif + + const typename internal::remove_all::type& nestedExpression() const + { + return m_xpr; + } + + Index startRow() const + { + return m_startRow.value(); + } + + Index startCol() const + { + return m_startCol.value(); + } + + protected: + + const typename XprType::Nested m_xpr; + const internal::variable_if_dynamic m_startRow; + const internal::variable_if_dynamic m_startCol; + const internal::variable_if_dynamic m_blockRows; + const internal::variable_if_dynamic m_blockCols; +}; + +/** \internal */ +template +class Block + : public MapBase > +{ + public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Block) + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + /** Column or Row constructor + */ + inline Block(XprType& xpr, Index i) + : Base(internal::const_cast_ptr(&xpr.coeffRef( + (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0, + (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0)), + BlockRows==1 ? 1 : xpr.rows(), + BlockCols==1 ? 1 : xpr.cols()), + m_xpr(xpr) + { + eigen_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= xpr.rows() + && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= xpr.cols()); + init(); + } + + /** Dynamic-size constructor + */ + inline Block(XprType& xpr, + Index startRow, Index startCol, + Index blockRows, Index blockCols) + : Base(internal::const_cast_ptr(&xpr.coeffRef(startRow,startCol)), blockRows, blockCols), + m_xpr(xpr) + { + eigen_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + eigen_assert(startRow >= 0 && blockRows >= 0 && startRow + blockRows <= xpr.rows() + && startCol >= 0 && blockCols >= 0 && startCol + blockCols <= xpr.cols()); + init(); + } + + const typename internal::remove_all::type& nestedExpression() const + { + return m_xpr; + } + + /** \sa MapBase::innerStride() */ + inline Index innerStride() const + { + return internal::traits::HasSameStorageOrderAsXprType + ? m_xpr.innerStride() + : m_xpr.outerStride(); + } + + /** \sa MapBase::outerStride() */ + inline Index outerStride() const + { + return m_outerStride; + } + + #ifndef __SUNPRO_CC + // FIXME sunstudio is not friendly with the above friend... + // META-FIXME there is no 'friend' keyword around here. Is this obsolete? + protected: + #endif + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal used by allowAligned() */ + inline Block(XprType& xpr, const Scalar* data, Index blockRows, Index blockCols) + : Base(data, blockRows, blockCols), m_xpr(xpr) + { + init(); + } + #endif + + protected: + void init() + { + m_outerStride = internal::traits::HasSameStorageOrderAsXprType + ? m_xpr.outerStride() + : m_xpr.innerStride(); + } + + typename XprType::Nested m_xpr; + Index m_outerStride; +}; + +} // end namespace Eigen + +#endif // EIGEN_BLOCK_H diff --git a/latan/Eigen/src/Core/BooleanRedux.h b/latan/Eigen/src/Core/BooleanRedux.h new file mode 100644 index 0000000..57efd8e --- /dev/null +++ b/latan/Eigen/src/Core/BooleanRedux.h @@ -0,0 +1,138 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_ALLANDANY_H +#define EIGEN_ALLANDANY_H + +namespace Eigen { + +namespace internal { + +template +struct all_unroller +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + static inline bool run(const Derived &mat) + { + return all_unroller::run(mat) && mat.coeff(row, col); + } +}; + +template +struct all_unroller +{ + static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } +}; + +template +struct all_unroller +{ + static inline bool run(const Derived &) { return false; } +}; + +template +struct any_unroller +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + static inline bool run(const Derived &mat) + { + return any_unroller::run(mat) || mat.coeff(row, col); + } +}; + +template +struct any_unroller +{ + static inline bool run(const Derived &mat) { return mat.coeff(0, 0); } +}; + +template +struct any_unroller +{ + static inline bool run(const Derived &) { return false; } +}; + +} // end namespace internal + +/** \returns true if all coefficients are true + * + * Example: \include MatrixBase_all.cpp + * Output: \verbinclude MatrixBase_all.out + * + * \sa any(), Cwise::operator<() + */ +template +inline bool DenseBase::all() const +{ + enum { + unroll = SizeAtCompileTime != Dynamic + && CoeffReadCost != Dynamic + && NumTraits::AddCost != Dynamic + && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT + }; + if(unroll) + return internal::all_unroller::run(derived()); + else + { + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if (!coeff(i, j)) return false; + return true; + } +} + +/** \returns true if at least one coefficient is true + * + * \sa all() + */ +template +inline bool DenseBase::any() const +{ + enum { + unroll = SizeAtCompileTime != Dynamic + && CoeffReadCost != Dynamic + && NumTraits::AddCost != Dynamic + && SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) <= EIGEN_UNROLLING_LIMIT + }; + if(unroll) + return internal::any_unroller::run(derived()); + else + { + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if (coeff(i, j)) return true; + return false; + } +} + +/** \returns the number of coefficients which evaluate to true + * + * \sa all(), any() + */ +template +inline typename DenseBase::Index DenseBase::count() const +{ + return derived().template cast().template cast().sum(); +} + +} // end namespace Eigen + +#endif // EIGEN_ALLANDANY_H diff --git a/latan/Eigen/src/Core/CommaInitializer.h b/latan/Eigen/src/Core/CommaInitializer.h new file mode 100644 index 0000000..f20c177 --- /dev/null +++ b/latan/Eigen/src/Core/CommaInitializer.h @@ -0,0 +1,141 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_COMMAINITIALIZER_H +#define EIGEN_COMMAINITIALIZER_H + +namespace Eigen { + +/** \class CommaInitializer + * \ingroup Core_Module + * + * \brief Helper class used by the comma initializer operator + * + * This class is internally used to implement the comma initializer feature. It is + * the return type of MatrixBase::operator<<, and most of the time this is the only + * way it is used. + * + * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() + */ +template +struct CommaInitializer +{ + typedef typename XprType::Scalar Scalar; + typedef typename XprType::Index Index; + + inline CommaInitializer(XprType& xpr, const Scalar& s) + : m_xpr(xpr), m_row(0), m_col(1), m_currentBlockRows(1) + { + m_xpr.coeffRef(0,0) = s; + } + + template + inline CommaInitializer(XprType& xpr, const DenseBase& other) + : m_xpr(xpr), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) + { + m_xpr.block(0, 0, other.rows(), other.cols()) = other; + } + + /* inserts a scalar value in the target matrix */ + CommaInitializer& operator,(const Scalar& s) + { + if (m_col==m_xpr.cols()) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = 1; + eigen_assert(m_row + CommaInitializer& operator,(const DenseBase& other) + { + if(other.cols()==0 || other.rows()==0) + return *this; + if (m_col==m_xpr.cols()) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = other.rows(); + eigen_assert(m_row+m_currentBlockRows<=m_xpr.rows() + && "Too many rows passed to comma initializer (operator<<)"); + } + eigen_assert(m_col + (m_row, m_col) = other; + else + m_xpr.block(m_row, m_col, other.rows(), other.cols()) = other; + m_col += other.cols(); + return *this; + } + + inline ~CommaInitializer() + { + eigen_assert((m_row+m_currentBlockRows) == m_xpr.rows() + && m_col == m_xpr.cols() + && "Too few coefficients passed to comma initializer (operator<<)"); + } + + /** \returns the built matrix once all its coefficients have been set. + * Calling finished is 100% optional. Its purpose is to write expressions + * like this: + * \code + * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); + * \endcode + */ + inline XprType& finished() { return m_xpr; } + + XprType& m_xpr; // target expression + Index m_row; // current row id + Index m_col; // current col id + Index m_currentBlockRows; // current block height +}; + +/** \anchor MatrixBaseCommaInitRef + * Convenient operator to set the coefficients of a matrix. + * + * The coefficients must be provided in a row major order and exactly match + * the size of the matrix. Otherwise an assertion is raised. + * + * Example: \include MatrixBase_set.cpp + * Output: \verbinclude MatrixBase_set.out + * + * \sa CommaInitializer::finished(), class CommaInitializer + */ +template +inline CommaInitializer DenseBase::operator<< (const Scalar& s) +{ + return CommaInitializer(*static_cast(this), s); +} + +/** \sa operator<<(const Scalar&) */ +template +template +inline CommaInitializer +DenseBase::operator<<(const DenseBase& other) +{ + return CommaInitializer(*static_cast(this), other); +} + +} // end namespace Eigen + +#endif // EIGEN_COMMAINITIALIZER_H diff --git a/latan/Eigen/src/Core/CwiseBinaryOp.h b/latan/Eigen/src/Core/CwiseBinaryOp.h new file mode 100644 index 0000000..1b93af3 --- /dev/null +++ b/latan/Eigen/src/Core/CwiseBinaryOp.h @@ -0,0 +1,229 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_BINARY_OP_H +#define EIGEN_CWISE_BINARY_OP_H + +namespace Eigen { + +/** \class CwiseBinaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise binary operator is applied to two expressions + * + * \param BinaryOp template functor implementing the operator + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * + * This class represents an expression where a coefficient-wise binary operator is applied to two expressions. + * It is the return type of binary operators, by which we mean only those binary operators where + * both the left-hand side and the right-hand side are Eigen expressions. + * For example, the return type of matrix1+matrix2 is a CwiseBinaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseBinaryOp types explicitly. + * + * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp + */ + +namespace internal { +template +struct traits > +{ + // we must not inherit from traits since it has + // the potential to cause problems with MSVC + typedef typename remove_all::type Ancestor; + typedef typename traits::XprKind XprKind; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime + }; + + // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename result_of< + BinaryOp( + typename Lhs::Scalar, + typename Rhs::Scalar + ) + >::type Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename remove_reference::type _LhsNested; + typedef typename remove_reference::type _RhsNested; + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + SameType = is_same::value, + StorageOrdersAgree = (int(Lhs::Flags)&RowMajorBit)==(int(Rhs::Flags)&RowMajorBit), + Flags0 = (int(LhsFlags) | int(RhsFlags)) & ( + HereditaryBits + | (int(LhsFlags) & int(RhsFlags) & + ( AlignedBit + | (StorageOrdersAgree ? LinearAccessBit : 0) + | (functor_traits::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0) + ) + ) + ), + Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit), + CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + functor_traits::Cost + }; +}; +} // end namespace internal + +// we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor +// that would take two operands of different types. If there were such an example, then this check should be +// moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as +// currently they take only one typename Scalar template parameter. +// It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths. +// So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to +// add together a float matrix and a double matrix. +#define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \ + EIGEN_STATIC_ASSERT((internal::functor_allows_mixing_real_and_complex::ret \ + ? int(internal::is_same::Real, typename NumTraits::Real>::value) \ + : int(internal::is_same::value)), \ + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + +template +class CwiseBinaryOpImpl; + +template +class CwiseBinaryOp : internal::no_assignment_operator, + public CwiseBinaryOpImpl< + BinaryOp, Lhs, Rhs, + typename internal::promote_storage_type::StorageKind, + typename internal::traits::StorageKind>::ret> +{ + public: + + typedef typename CwiseBinaryOpImpl< + BinaryOp, Lhs, Rhs, + typename internal::promote_storage_type::StorageKind, + typename internal::traits::StorageKind>::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) + + typedef typename internal::nested::type LhsNested; + typedef typename internal::nested::type RhsNested; + typedef typename internal::remove_reference::type _LhsNested; + typedef typename internal::remove_reference::type _RhsNested; + + EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs(lhs), m_rhs(rhs), m_functor(func) + { + EIGEN_CHECK_BINARY_COMPATIBILIY(BinaryOp,typename Lhs::Scalar,typename Rhs::Scalar); + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) + eigen_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); + } + + EIGEN_STRONG_INLINE Index rows() const { + // return the fixed size type if available to enable compile time optimizations + if (internal::traits::type>::RowsAtCompileTime==Dynamic) + return m_rhs.rows(); + else + return m_lhs.rows(); + } + EIGEN_STRONG_INLINE Index cols() const { + // return the fixed size type if available to enable compile time optimizations + if (internal::traits::type>::ColsAtCompileTime==Dynamic) + return m_rhs.cols(); + else + return m_lhs.cols(); + } + + /** \returns the left hand side nested expression */ + const _LhsNested& lhs() const { return m_lhs; } + /** \returns the right hand side nested expression */ + const _RhsNested& rhs() const { return m_rhs; } + /** \returns the functor representing the binary operation */ + const BinaryOp& functor() const { return m_functor; } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; + const BinaryOp m_functor; +}; + +template +class CwiseBinaryOpImpl + : public internal::dense_xpr_base >::type +{ + typedef CwiseBinaryOp Derived; + public: + + typedef typename internal::dense_xpr_base >::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE( Derived ) + + EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + { + return derived().functor()(derived().lhs().coeff(row, col), + derived().rhs().coeff(row, col)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + { + return derived().functor().packetOp(derived().lhs().template packet(row, col), + derived().rhs().template packet(row, col)); + } + + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const + { + return derived().functor()(derived().lhs().coeff(index), + derived().rhs().coeff(index)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const + { + return derived().functor().packetOp(derived().lhs().template packet(index), + derived().rhs().template packet(index)); + } +}; + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +MatrixBase::operator-=(const MatrixBase &other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +MatrixBase::operator+=(const MatrixBase& other) +{ + SelfCwiseBinaryOp, Derived, OtherDerived> tmp(derived()); + tmp = other.derived(); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_CWISE_BINARY_OP_H diff --git a/latan/Eigen/src/Core/CwiseNullaryOp.h b/latan/Eigen/src/Core/CwiseNullaryOp.h new file mode 100644 index 0000000..2635a62 --- /dev/null +++ b/latan/Eigen/src/Core/CwiseNullaryOp.h @@ -0,0 +1,864 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_NULLARY_OP_H +#define EIGEN_CWISE_NULLARY_OP_H + +namespace Eigen { + +/** \class CwiseNullaryOp + * \ingroup Core_Module + * + * \brief Generic expression of a matrix where all coefficients are defined by a functor + * + * \param NullaryOp template functor implementing the operator + * \param PlainObjectType the underlying plain matrix/array type + * + * This class represents an expression of a generic nullary operator. + * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() methods, + * and most of the time this is the only way it is used. + * + * However, if you want to write a function returning such an expression, you + * will need to use this class. + * + * \sa class CwiseUnaryOp, class CwiseBinaryOp, DenseBase::NullaryExpr() + */ + +namespace internal { +template +struct traits > : traits +{ + enum { + Flags = (traits::Flags + & ( HereditaryBits + | (functor_has_linear_access::ret ? LinearAccessBit : 0) + | (functor_traits::PacketAccess ? PacketAccessBit : 0))) + | (functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), + CoeffReadCost = functor_traits::Cost + }; +}; +} + +template +class CwiseNullaryOp : internal::no_assignment_operator, + public internal::dense_xpr_base< CwiseNullaryOp >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(CwiseNullaryOp) + + CwiseNullaryOp(Index rows, Index cols, const NullaryOp& func = NullaryOp()) + : m_rows(rows), m_cols(cols), m_functor(func) + { + eigen_assert(rows >= 0 + && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols >= 0 + && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + } + + EIGEN_STRONG_INLINE Index rows() const { return m_rows.value(); } + EIGEN_STRONG_INLINE Index cols() const { return m_cols.value(); } + + EIGEN_STRONG_INLINE const Scalar coeff(Index rows, Index cols) const + { + return m_functor(rows, cols); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + { + return m_functor.packetOp(row, col); + } + + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const + { + return m_functor(index); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const + { + return m_functor.packetOp(index); + } + + /** \returns the functor representing the nullary operation */ + const NullaryOp& functor() const { return m_functor; } + + protected: + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; + const NullaryOp m_functor; +}; + + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +DenseBase::NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func) +{ + return CwiseNullaryOp(rows, cols, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +DenseBase::NullaryExpr(Index size, const CustomNullaryOp& func) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); + else return CwiseNullaryOp(size, 1, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +DenseBase::NullaryExpr(const CustomNullaryOp& func) +{ + return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this DenseBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index rows, Index cols, const Scalar& value) +{ + return DenseBase::NullaryExpr(rows, cols, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this DenseBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(Index size, const Scalar& value) +{ + return DenseBase::NullaryExpr(size, internal::scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * This variant is only for fixed-size DenseBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Constant(const Scalar& value) +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_constant_op(value)); +} + +/** + * \brief Sets a linearly space vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * This particular version of LinSpaced() uses sequential access, i.e. vector access is + * assumed to be a(0), a(1), ..., a(size). This assumption allows for better vectorization + * and yields faster code than the random access version. + * + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced_seq.cpp + * Output: \verbinclude DenseBase_LinSpaced_seq.out + * + * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Index,Scalar,Scalar), CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); +} + +/** + * \copydoc DenseBase::LinSpaced(Sequential_t, Index, const Scalar&, const Scalar&) + * Special version for fixed size types which does not require the size parameter. + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::SequentialLinSpacedReturnType +DenseBase::LinSpaced(Sequential_t, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); +} + +/** + * \brief Sets a linearly space vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_LinSpaced.cpp + * Output: \verbinclude DenseBase_LinSpaced.out + * + * \sa setLinSpaced(Index,const Scalar&,const Scalar&), LinSpaced(Sequential_t,Index,const Scalar&,const Scalar&,Index), CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(Index size, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return DenseBase::NullaryExpr(size, internal::linspaced_op(low,high,size)); +} + +/** + * \copydoc DenseBase::LinSpaced(Index, const Scalar&, const Scalar&) + * Special version for fixed size types which does not require the size parameter. + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::RandomAccessLinSpacedReturnType +DenseBase::LinSpaced(const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return DenseBase::NullaryExpr(Derived::SizeAtCompileTime, internal::linspaced_op(low,high,Derived::SizeAtCompileTime)); +} + +/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +bool DenseBase::isApproxToConstant +(const Scalar& value, RealScalar prec) const +{ + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if(!internal::isApprox(this->coeff(i, j), value, prec)) + return false; + return true; +} + +/** This is just an alias for isApproxToConstant(). + * + * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +bool DenseBase::isConstant +(const Scalar& value, RealScalar prec) const +{ + return isApproxToConstant(value, prec); +} + +/** Alias for setConstant(): sets all coefficients in this expression to \a value. + * + * \sa setConstant(), Constant(), class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE void DenseBase::fill(const Scalar& value) +{ + setConstant(value); +} + +/** Sets all coefficients in this expression to \a value. + * + * \sa fill(), setConstant(Index,const Scalar&), setConstant(Index,Index,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setConstant(const Scalar& value) +{ + return derived() = Constant(rows(), cols(), value); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value. + * + * \only_for_vectors + * + * Example: \include Matrix_setConstant_int.cpp + * Output: \verbinclude Matrix_setConstant_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(Index size, const Scalar& value) +{ + resize(size); + return setConstant(value); +} + +/** Resizes to the given size, and sets all coefficients in this expression to the given \a value. + * + * \param rows the new number of rows + * \param cols the new number of columns + * \param value the value to which all coefficients are set + * + * Example: \include Matrix_setConstant_int_int.cpp + * Output: \verbinclude Matrix_setConstant_int_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(Index,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setConstant(Index rows, Index cols, const Scalar& value) +{ + resize(rows, cols); + return setConstant(value); +} + +/** + * \brief Sets a linearly space vector. + * + * The function generates 'size' equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * Example: \include DenseBase_setLinSpaced.cpp + * Output: \verbinclude DenseBase_setLinSpaced.out + * + * \sa CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(Index size, const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return derived() = Derived::NullaryExpr(size, internal::linspaced_op(low,high,size)); +} + +/** + * \brief Sets a linearly space vector. + * + * The function fill *this with equally spaced values in the closed interval [low,high]. + * When size is set to 1, a vector of length 1 containing 'high' is returned. + * + * \only_for_vectors + * + * \sa setLinSpaced(Index, const Scalar&, const Scalar&), CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setLinSpaced(const Scalar& low, const Scalar& high) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return setLinSpaced(size(), low, high); +} + +// zero: + +/** \returns an expression of a zero matrix. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int_int.cpp + * Output: \verbinclude MatrixBase_zero_int_int.out + * + * \sa Zero(), Zero(Index) + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero(Index rows, Index cols) +{ + return Constant(rows, cols, Scalar(0)); +} + +/** \returns an expression of a zero vector. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int.cpp + * Output: \verbinclude MatrixBase_zero_int.out + * + * \sa Zero(), Zero(Index,Index) + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero(Index size) +{ + return Constant(size, Scalar(0)); +} + +/** \returns an expression of a fixed-size zero matrix or vector. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_zero.cpp + * Output: \verbinclude MatrixBase_zero.out + * + * \sa Zero(Index), Zero(Index,Index) + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Zero() +{ + return Constant(Scalar(0)); +} + +/** \returns true if *this is approximately equal to the zero matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isZero.cpp + * Output: \verbinclude MatrixBase_isZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +bool DenseBase::isZero(RealScalar prec) const +{ + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < rows(); ++i) + if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) + return false; + return true; +} + +/** Sets all coefficients in this expression to zero. + * + * Example: \include MatrixBase_setZero.cpp + * Output: \verbinclude MatrixBase_setZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setZero() +{ + return setConstant(Scalar(0)); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to zero. + * + * \only_for_vectors + * + * Example: \include Matrix_setZero_int.cpp + * Output: \verbinclude Matrix_setZero_int.out + * + * \sa DenseBase::setZero(), setZero(Index,Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(Index size) +{ + resize(size); + return setConstant(Scalar(0)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to zero. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setZero_int_int.cpp + * Output: \verbinclude Matrix_setZero_int_int.out + * + * \sa DenseBase::setZero(), setZero(Index), class CwiseNullaryOp, DenseBase::Zero() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setZero(Index rows, Index cols) +{ + resize(rows, cols); + return setConstant(Scalar(0)); +} + +// ones: + +/** \returns an expression of a matrix where all coefficients equal one. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int_int.cpp + * Output: \verbinclude MatrixBase_ones_int_int.out + * + * \sa Ones(), Ones(Index), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones(Index rows, Index cols) +{ + return Constant(rows, cols, Scalar(1)); +} + +/** \returns an expression of a vector where all coefficients equal one. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int.cpp + * Output: \verbinclude MatrixBase_ones_int.out + * + * \sa Ones(), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones(Index size) +{ + return Constant(size, Scalar(1)); +} + +/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_ones.cpp + * Output: \verbinclude MatrixBase_ones.out + * + * \sa Ones(Index), Ones(Index,Index), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename DenseBase::ConstantReturnType +DenseBase::Ones() +{ + return Constant(Scalar(1)); +} + +/** \returns true if *this is approximately equal to the matrix where all coefficients + * are equal to 1, within the precision given by \a prec. + * + * Example: \include MatrixBase_isOnes.cpp + * Output: \verbinclude MatrixBase_isOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +bool DenseBase::isOnes +(RealScalar prec) const +{ + return isApproxToConstant(Scalar(1), prec); +} + +/** Sets all coefficients in this expression to one. + * + * Example: \include MatrixBase_setOnes.cpp + * Output: \verbinclude MatrixBase_setOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_STRONG_INLINE Derived& DenseBase::setOnes() +{ + return setConstant(Scalar(1)); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to one. + * + * \only_for_vectors + * + * Example: \include Matrix_setOnes_int.cpp + * Output: \verbinclude Matrix_setOnes_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index,Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(Index size) +{ + resize(size); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to one. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setOnes_int_int.cpp + * Output: \verbinclude Matrix_setOnes_int_int.out + * + * \sa MatrixBase::setOnes(), setOnes(Index), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setOnes(Index rows, Index cols) +{ + resize(rows, cols); + return setConstant(Scalar(1)); +} + +// Identity: + +/** \returns an expression of the identity matrix (not necessarily square). + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used + * instead. + * + * Example: \include MatrixBase_identity_int_int.cpp + * Output: \verbinclude MatrixBase_identity_int_int.out + * + * \sa Identity(), setIdentity(), isIdentity() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity(Index rows, Index cols) +{ + return DenseBase::NullaryExpr(rows, cols, internal::scalar_identity_op()); +} + +/** \returns an expression of the identity matrix (not necessarily square). + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variant taking size arguments. + * + * Example: \include MatrixBase_identity.cpp + * Output: \verbinclude MatrixBase_identity.out + * + * \sa Identity(Index,Index), setIdentity(), isIdentity() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity() +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return MatrixBase::NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_identity_op()); +} + +/** \returns true if *this is approximately equal to the identity matrix + * (not necessarily square), + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isIdentity.cpp + * Output: \verbinclude MatrixBase_isIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), setIdentity() + */ +template +bool MatrixBase::isIdentity +(RealScalar prec) const +{ + for(Index j = 0; j < cols(); ++j) + { + for(Index i = 0; i < rows(); ++i) + { + if(i == j) + { + if(!internal::isApprox(this->coeff(i, j), static_cast(1), prec)) + return false; + } + else + { + if(!internal::isMuchSmallerThan(this->coeff(i, j), static_cast(1), prec)) + return false; + } + } + } + return true; +} + +namespace internal { + +template=16)> +struct setIdentity_impl +{ + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + return m = Derived::Identity(m.rows(), m.cols()); + } +}; + +template +struct setIdentity_impl +{ + typedef typename Derived::Index Index; + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + m.setZero(); + const Index size = (std::min)(m.rows(), m.cols()); + for(Index i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); + return m; + } +}; + +} // end namespace internal + +/** Writes the identity expression (not necessarily square) into *this. + * + * Example: \include MatrixBase_setIdentity.cpp + * Output: \verbinclude MatrixBase_setIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(Index,Index), isIdentity() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() +{ + return internal::setIdentity_impl::run(derived()); +} + +/** \brief Resizes to the given size, and writes the identity expression (not necessarily square) into *this. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setIdentity_int_int.cpp + * Output: \verbinclude Matrix_setIdentity_int_int.out + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity(Index rows, Index cols) +{ + derived().resize(rows, cols); + return setIdentity(); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index size, Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(size,size), i); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * This variant is for fixed-size vector only. + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(Index i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(),i); +} + +/** \returns an expression of the X axis unit vector (1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() +{ return Derived::Unit(0); } + +/** \returns an expression of the Y axis unit vector (0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() +{ return Derived::Unit(1); } + +/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() +{ return Derived::Unit(2); } + +/** \returns an expression of the W axis unit vector (0,0,0,1) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(Index,Index), MatrixBase::Unit(Index), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() +{ return Derived::Unit(3); } + +} // end namespace Eigen + +#endif // EIGEN_CWISE_NULLARY_OP_H diff --git a/latan/Eigen/src/Core/CwiseUnaryOp.h b/latan/Eigen/src/Core/CwiseUnaryOp.h new file mode 100644 index 0000000..063355a --- /dev/null +++ b/latan/Eigen/src/Core/CwiseUnaryOp.h @@ -0,0 +1,126 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_OP_H +#define EIGEN_CWISE_UNARY_OP_H + +namespace Eigen { + +/** \class CwiseUnaryOp + * \ingroup Core_Module + * + * \brief Generic expression where a coefficient-wise unary operator is applied to an expression + * + * \param UnaryOp template functor implementing the operator + * \param XprType the type of the expression to which we are applying the unary operator + * + * This class represents an expression where a unary operator is applied to an expression. + * It is the return type of all operations taking exactly 1 input expression, regardless of the + * presence of other inputs such as scalars. For example, the operator* in the expression 3*matrix + * is considered unary, because only the right-hand side is an expression, and its + * return type is a specialization of CwiseUnaryOp. + * + * Most of the time, this is the only way that it is used, so you typically don't have to name + * CwiseUnaryOp types explicitly. + * + * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename result_of< + UnaryOp(typename XprType::Scalar) + >::type Scalar; + typedef typename XprType::Nested XprTypeNested; + typedef typename remove_reference::type _XprTypeNested; + enum { + Flags = _XprTypeNested::Flags & ( + HereditaryBits | LinearAccessBit | AlignedBit + | (functor_traits::PacketAccess ? PacketAccessBit : 0)), + CoeffReadCost = _XprTypeNested::CoeffReadCost + functor_traits::Cost + }; +}; +} + +template +class CwiseUnaryOpImpl; + +template +class CwiseUnaryOp : internal::no_assignment_operator, + public CwiseUnaryOpImpl::StorageKind> +{ + public: + + typedef typename CwiseUnaryOpImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) + + inline CwiseUnaryOp(const XprType& xpr, const UnaryOp& func = UnaryOp()) + : m_xpr(xpr), m_functor(func) {} + + EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); } + + /** \returns the functor representing the unary operation */ + const UnaryOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + const typename internal::remove_all::type& + nestedExpression() const { return m_xpr; } + + /** \returns the nested expression */ + typename internal::remove_all::type& + nestedExpression() { return m_xpr.const_cast_derived(); } + + protected: + typename XprType::Nested m_xpr; + const UnaryOp m_functor; +}; + +// This is the generic implementation for dense storage. +// It can be used for any expression types implementing the dense concept. +template +class CwiseUnaryOpImpl + : public internal::dense_xpr_base >::type +{ + public: + + typedef CwiseUnaryOp Derived; + typedef typename internal::dense_xpr_base >::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + + EIGEN_STRONG_INLINE const Scalar coeff(Index row, Index col) const + { + return derived().functor()(derived().nestedExpression().coeff(row, col)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + { + return derived().functor().packetOp(derived().nestedExpression().template packet(row, col)); + } + + EIGEN_STRONG_INLINE const Scalar coeff(Index index) const + { + return derived().functor()(derived().nestedExpression().coeff(index)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const + { + return derived().functor().packetOp(derived().nestedExpression().template packet(index)); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_UNARY_OP_H diff --git a/latan/Eigen/src/Core/CwiseUnaryView.h b/latan/Eigen/src/Core/CwiseUnaryView.h new file mode 100644 index 0000000..57fbd72 --- /dev/null +++ b/latan/Eigen/src/Core/CwiseUnaryView.h @@ -0,0 +1,136 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_CWISE_UNARY_VIEW_H +#define EIGEN_CWISE_UNARY_VIEW_H + +namespace Eigen { + +/** \class CwiseUnaryView + * \ingroup Core_Module + * + * \brief Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector + * + * \param ViewOp template functor implementing the view + * \param MatrixType the type of the matrix we are applying the unary operator + * + * This class represents a lvalue expression of a generic unary view operator of a matrix or a vector. + * It is the return type of real() and imag(), and most of the time this is the only way it is used. + * + * \sa MatrixBase::unaryViewExpr(const CustomUnaryOp &) const, class CwiseUnaryOp + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename result_of< + ViewOp(typename traits::Scalar) + >::type Scalar; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename remove_all::type _MatrixTypeNested; + enum { + Flags = (traits<_MatrixTypeNested>::Flags & (HereditaryBits | LvalueBit | LinearAccessBit | DirectAccessBit)), + CoeffReadCost = traits<_MatrixTypeNested>::CoeffReadCost + functor_traits::Cost, + MatrixTypeInnerStride = inner_stride_at_compile_time::ret, + // need to cast the sizeof's from size_t to int explicitly, otherwise: + // "error: no integral type can represent all of the enumerator values + InnerStrideAtCompileTime = MatrixTypeInnerStride == Dynamic + ? int(Dynamic) + : int(MatrixTypeInnerStride) * int(sizeof(typename traits::Scalar) / sizeof(Scalar)), + OuterStrideAtCompileTime = outer_stride_at_compile_time::ret == Dynamic + ? int(Dynamic) + : outer_stride_at_compile_time::ret * int(sizeof(typename traits::Scalar) / sizeof(Scalar)) + }; +}; +} + +template +class CwiseUnaryViewImpl; + +template +class CwiseUnaryView : internal::no_assignment_operator, + public CwiseUnaryViewImpl::StorageKind> +{ + public: + + typedef typename CwiseUnaryViewImpl::StorageKind>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryView) + + inline CwiseUnaryView(const MatrixType& mat, const ViewOp& func = ViewOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(CwiseUnaryView) + + EIGEN_STRONG_INLINE Index rows() const { return m_matrix.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_matrix.cols(); } + + /** \returns the functor representing unary operation */ + const ViewOp& functor() const { return m_functor; } + + /** \returns the nested expression */ + const typename internal::remove_all::type& + nestedExpression() const { return m_matrix; } + + /** \returns the nested expression */ + typename internal::remove_all::type& + nestedExpression() { return m_matrix.const_cast_derived(); } + + protected: + // FIXME changed from MatrixType::Nested because of a weird compilation error with sun CC + typename internal::nested::type m_matrix; + ViewOp m_functor; +}; + +template +class CwiseUnaryViewImpl + : public internal::dense_xpr_base< CwiseUnaryView >::type +{ + public: + + typedef CwiseUnaryView Derived; + typedef typename internal::dense_xpr_base< CwiseUnaryView >::type Base; + + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) + + inline Index innerStride() const + { + return derived().nestedExpression().innerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); + } + + inline Index outerStride() const + { + return derived().nestedExpression().outerStride() * sizeof(typename internal::traits::Scalar) / sizeof(Scalar); + } + + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const + { + return derived().functor()(derived().nestedExpression().coeff(row, col)); + } + + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const + { + return derived().functor()(derived().nestedExpression().coeff(index)); + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) + { + return derived().functor()(const_cast_derived().nestedExpression().coeffRef(row, col)); + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return derived().functor()(const_cast_derived().nestedExpression().coeffRef(index)); + } +}; + +} // end namespace Eigen + +#endif // EIGEN_CWISE_UNARY_VIEW_H diff --git a/latan/Eigen/src/Core/DenseBase.h b/latan/Eigen/src/Core/DenseBase.h new file mode 100644 index 0000000..1cc0314 --- /dev/null +++ b/latan/Eigen/src/Core/DenseBase.h @@ -0,0 +1,533 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSEBASE_H +#define EIGEN_DENSEBASE_H + +namespace Eigen { + +/** \class DenseBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and arrays + * + * This class is the base that is inherited by all dense objects (matrix, vector, arrays, + * and related expression types). The common Eigen API for dense objects is contained in this class. + * + * \tparam Derived is the derived type, e.g., a matrix type or an expression. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_DENSEBASE_PLUGIN. + * + * \sa \ref TopicClassHierarchy + */ +template class DenseBase +#ifndef EIGEN_PARSED_BY_DOXYGEN + : public internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real> +#else + : public DenseCoeffsBase +#endif // not EIGEN_PARSED_BY_DOXYGEN +{ + public: + using internal::special_scalar_op_base::Scalar, + typename NumTraits::Scalar>::Real>::operator*; + + class InnerIterator; + + typedef typename internal::traits::StorageKind StorageKind; + + /** \brief The type of indices + * \details To change this, \c \#define the preprocessor symbol \c EIGEN_DEFAULT_DENSE_INDEX_TYPE. + * \sa \ref TopicPreprocessorDirectives. + */ + typedef typename internal::traits::Index Index; + + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseCoeffsBase Base; + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::rowIndexByOuterInner; + using Base::colIndexByOuterInner; + using Base::coeff; + using Base::coeffByOuterInner; + using Base::packet; + using Base::packetByOuterInner; + using Base::writePacket; + using Base::writePacketByOuterInner; + using Base::coeffRef; + using Base::coeffRefByOuterInner; + using Base::copyCoeff; + using Base::copyCoeffByOuterInner; + using Base::copyPacket; + using Base::copyPacketByOuterInner; + using Base::operator(); + using Base::operator[]; + using Base::x; + using Base::y; + using Base::z; + using Base::w; + using Base::stride; + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + typedef typename Base::CoeffReturnType CoeffReturnType; + + enum { + + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (internal::size_at_compile_time::RowsAtCompileTime, + internal::traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = internal::traits::MaxRowsAtCompileTime, + /**< This value is equal to the maximum possible number of rows that this expression + * might have. If this expression might have an arbitrarily high number of rows, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxColsAtCompileTime = internal::traits::MaxColsAtCompileTime, + /**< This value is equal to the maximum possible number of columns that this expression + * might have. If this expression might have an arbitrarily high number of columns, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxSizeAtCompileTime = (internal::size_at_compile_time::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime>::ret), + /**< This value is equal to the maximum possible number of coefficients that this expression + * might have. If this expression might have an arbitrarily high number of coefficients, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime + */ + + IsVectorAtCompileTime = internal::traits::MaxRowsAtCompileTime == 1 + || internal::traits::MaxColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + Flags = internal::traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression has row-major storage order. */ + + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + + CoeffReadCost = internal::traits::CoeffReadCost, + /**< This is a rough measure of how expensive it is to read one coefficient from + * this expression. + */ + + InnerStrideAtCompileTime = internal::inner_stride_at_compile_time::ret, + OuterStrideAtCompileTime = internal::outer_stride_at_compile_time::ret + }; + + enum { ThisConstantIsPrivateInPlainObjectBase }; + + /** \returns the number of nonzero coefficients which is in practice the number + * of stored coefficients. */ + inline Index nonZeros() const { return size(); } + /** \returns true if either the number of rows or the number of columns is equal to 1. + * In other words, this function returns + * \code rows()==1 || cols()==1 \endcode + * \sa rows(), cols(), IsVectorAtCompileTime. */ + + /** \returns the outer size. + * + * \note For a vector, this returns just 1. For a matrix (non-vector), this is the major dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of columns for a + * column-major matrix, and the number of rows for a row-major matrix. */ + Index outerSize() const + { + return IsVectorAtCompileTime ? 1 + : int(IsRowMajor) ? this->rows() : this->cols(); + } + + /** \returns the inner size. + * + * \note For a vector, this is just the size. For a matrix (non-vector), this is the minor dimension + * with respect to the \ref TopicStorageOrders "storage order", i.e., the number of rows for a + * column-major matrix, and the number of columns for a row-major matrix. */ + Index innerSize() const + { + return IsVectorAtCompileTime ? this->size() + : int(IsRowMajor) ? this->cols() : this->rows(); + } + + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does + * nothing else. + */ + void resize(Index size) + { + EIGEN_ONLY_USED_FOR_DEBUG(size); + eigen_assert(size == this->size() + && "DenseBase::resize() does not actually allow to resize."); + } + /** Only plain matrices/arrays, not expressions, may be resized; therefore the only useful resize methods are + * Matrix::resize() and Array::resize(). The present method only asserts that the new size equals the old size, and does + * nothing else. + */ + void resize(Index rows, Index cols) + { + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + eigen_assert(rows == this->rows() && cols == this->cols() + && "DenseBase::resize() does not actually allow to resize."); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,Derived> ConstantReturnType; + /** \internal Represents a vector with linearly spaced coefficients that allows sequential access only. */ + typedef CwiseNullaryOp,Derived> SequentialLinSpacedReturnType; + /** \internal Represents a vector with linearly spaced coefficients that allows random access. */ + typedef CwiseNullaryOp,Derived> RandomAccessLinSpacedReturnType; + /** \internal the return type of MatrixBase::eigenvalues() */ + typedef Matrix::Scalar>::Real, internal::traits::ColsAtCompileTime, 1> EigenvaluesReturnType; + +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** Copies \a other into *this. \returns a reference to *this. */ + template + Derived& operator=(const DenseBase& other); + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + Derived& operator=(const DenseBase& other); + + template + Derived& operator=(const EigenBase &other); + + template + Derived& operator+=(const EigenBase &other); + + template + Derived& operator-=(const EigenBase &other); + + template + Derived& operator=(const ReturnByValue& func); + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** Copies \a other into *this without evaluating other. \returns a reference to *this. */ + template + Derived& lazyAssign(const DenseBase& other); +#endif // not EIGEN_PARSED_BY_DOXYGEN + + CommaInitializer operator<< (const Scalar& s); + + template + const Flagged flagged() const; + + template + CommaInitializer operator<< (const DenseBase& other); + + Eigen::Transpose transpose(); + typedef const Transpose ConstTransposeReturnType; + ConstTransposeReturnType transpose() const; + void transposeInPlace(); +#ifndef EIGEN_NO_DEBUG + protected: + template + void checkTransposeAliasing(const OtherDerived& other) const; + public: +#endif + + typedef VectorBlock SegmentReturnType; + typedef const VectorBlock ConstSegmentReturnType; + template struct FixedSegmentReturnType { typedef VectorBlock Type; }; + template struct ConstFixedSegmentReturnType { typedef const VectorBlock Type; }; + + // Note: The "DenseBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. + SegmentReturnType segment(Index start, Index size); + typename DenseBase::ConstSegmentReturnType segment(Index start, Index size) const; + + SegmentReturnType head(Index size); + typename DenseBase::ConstSegmentReturnType head(Index size) const; + + SegmentReturnType tail(Index size); + typename DenseBase::ConstSegmentReturnType tail(Index size) const; + + template typename FixedSegmentReturnType::Type head(); + template typename ConstFixedSegmentReturnType::Type head() const; + + template typename FixedSegmentReturnType::Type tail(); + template typename ConstFixedSegmentReturnType::Type tail() const; + + template typename FixedSegmentReturnType::Type segment(Index start); + template typename ConstFixedSegmentReturnType::Type segment(Index start) const; + + static const ConstantReturnType + Constant(Index rows, Index cols, const Scalar& value); + static const ConstantReturnType + Constant(Index size, const Scalar& value); + static const ConstantReturnType + Constant(const Scalar& value); + + static const SequentialLinSpacedReturnType + LinSpaced(Sequential_t, Index size, const Scalar& low, const Scalar& high); + static const RandomAccessLinSpacedReturnType + LinSpaced(Index size, const Scalar& low, const Scalar& high); + static const SequentialLinSpacedReturnType + LinSpaced(Sequential_t, const Scalar& low, const Scalar& high); + static const RandomAccessLinSpacedReturnType + LinSpaced(const Scalar& low, const Scalar& high); + + template + static const CwiseNullaryOp + NullaryExpr(Index rows, Index cols, const CustomNullaryOp& func); + template + static const CwiseNullaryOp + NullaryExpr(Index size, const CustomNullaryOp& func); + template + static const CwiseNullaryOp + NullaryExpr(const CustomNullaryOp& func); + + static const ConstantReturnType Zero(Index rows, Index cols); + static const ConstantReturnType Zero(Index size); + static const ConstantReturnType Zero(); + static const ConstantReturnType Ones(Index rows, Index cols); + static const ConstantReturnType Ones(Index size); + static const ConstantReturnType Ones(); + + void fill(const Scalar& value); + Derived& setConstant(const Scalar& value); + Derived& setLinSpaced(Index size, const Scalar& low, const Scalar& high); + Derived& setLinSpaced(const Scalar& low, const Scalar& high); + Derived& setZero(); + Derived& setOnes(); + Derived& setRandom(); + + template + bool isApprox(const DenseBase& other, + RealScalar prec = NumTraits::dummy_precision()) const; + bool isMuchSmallerThan(const RealScalar& other, + RealScalar prec = NumTraits::dummy_precision()) const; + template + bool isMuchSmallerThan(const DenseBase& other, + RealScalar prec = NumTraits::dummy_precision()) const; + + bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits::dummy_precision()) const; + bool isConstant(const Scalar& value, RealScalar prec = NumTraits::dummy_precision()) const; + bool isZero(RealScalar prec = NumTraits::dummy_precision()) const; + bool isOnes(RealScalar prec = NumTraits::dummy_precision()) const; + + inline Derived& operator*=(const Scalar& other); + inline Derived& operator/=(const Scalar& other); + + typedef typename internal::add_const_on_value_type::type>::type EvalReturnType; + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + */ + EIGEN_STRONG_INLINE EvalReturnType eval() const + { + // Even though MSVC does not honor strong inlining when the return type + // is a dynamic matrix, we desperately need strong inlining for fixed + // size types on MSVC. + return typename internal::eval::type(derived()); + } + + /** swaps *this with the expression \a other. + * + */ + template + void swap(const DenseBase& other, + int = OtherDerived::ThisConstantIsPrivateInPlainObjectBase) + { + SwapWrapper(derived()).lazyAssign(other.derived()); + } + + /** swaps *this with the matrix or array \a other. + * + */ + template + void swap(PlainObjectBase& other) + { + SwapWrapper(derived()).lazyAssign(other.derived()); + } + + + inline const NestByValue nestByValue() const; + inline const ForceAlignedAccess forceAlignedAccess() const; + inline ForceAlignedAccess forceAlignedAccess(); + template inline const typename internal::conditional,Derived&>::type forceAlignedAccessIf() const; + template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); + + Scalar sum() const; + Scalar mean() const; + Scalar trace() const; + + Scalar prod() const; + + typename internal::traits::Scalar minCoeff() const; + typename internal::traits::Scalar maxCoeff() const; + + template + typename internal::traits::Scalar minCoeff(IndexType* row, IndexType* col) const; + template + typename internal::traits::Scalar maxCoeff(IndexType* row, IndexType* col) const; + template + typename internal::traits::Scalar minCoeff(IndexType* index) const; + template + typename internal::traits::Scalar maxCoeff(IndexType* index) const; + + template + typename internal::result_of::Scalar)>::type + redux(const BinaryOp& func) const; + + template + void visit(Visitor& func) const; + + inline const WithFormat format(const IOFormat& fmt) const; + + /** \returns the unique coefficient of a 1x1 expression */ + CoeffReturnType value() const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeff(0,0); + } + +/////////// Array module /////////// + + bool all(void) const; + bool any(void) const; + Index count() const; + + typedef VectorwiseOp RowwiseReturnType; + typedef const VectorwiseOp ConstRowwiseReturnType; + typedef VectorwiseOp ColwiseReturnType; + typedef const VectorwiseOp ConstColwiseReturnType; + + ConstRowwiseReturnType rowwise() const; + RowwiseReturnType rowwise(); + ConstColwiseReturnType colwise() const; + ColwiseReturnType colwise(); + + static const CwiseNullaryOp,Derived> Random(Index rows, Index cols); + static const CwiseNullaryOp,Derived> Random(Index size); + static const CwiseNullaryOp,Derived> Random(); + + template + const Select + select(const DenseBase& thenMatrix, + const DenseBase& elseMatrix) const; + + template + inline const Select + select(const DenseBase& thenMatrix, typename ThenDerived::Scalar elseScalar) const; + + template + inline const Select + select(typename ElseDerived::Scalar thenScalar, const DenseBase& elseMatrix) const; + + template RealScalar lpNorm() const; + + template + const Replicate replicate() const; + const Replicate replicate(Index rowFacor,Index colFactor) const; + + typedef Reverse ReverseReturnType; + typedef const Reverse ConstReverseReturnType; + ReverseReturnType reverse(); + ConstReverseReturnType reverse() const; + void reverseInPlace(); + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::DenseBase +# include "../plugins/BlockMethods.h" +# ifdef EIGEN_DENSEBASE_PLUGIN +# include EIGEN_DENSEBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS + +#ifdef EIGEN2_SUPPORT + + Block corner(CornerType type, Index cRows, Index cCols); + const Block corner(CornerType type, Index cRows, Index cCols) const; + template + Block corner(CornerType type); + template + const Block corner(CornerType type) const; + +#endif // EIGEN2_SUPPORT + + + // disable the use of evalTo for dense objects with a nice compilation error + template inline void evalTo(Dest& ) const + { + EIGEN_STATIC_ASSERT((internal::is_same::value),THE_EVAL_EVALTO_FUNCTION_SHOULD_NEVER_BE_CALLED_FOR_DENSE_OBJECTS); + } + + protected: + /** Default constructor. Do nothing. */ + DenseBase() + { + /* Just checks for self-consistency of the flags. + * Only do it when debugging Eigen, as this borders on paranoiac and could slow compilation down + */ +#ifdef EIGEN_INTERNAL_DEBUGGING + EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, int(IsRowMajor)) + && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, int(!IsRowMajor))), + INVALID_STORAGE_ORDER_FOR_THIS_VECTOR_EXPRESSION) +#endif + } + + private: + explicit DenseBase(int); + DenseBase(int,int); + template explicit DenseBase(const DenseBase&); +}; + +} // end namespace Eigen + +#endif // EIGEN_DENSEBASE_H diff --git a/latan/Eigen/src/Core/DenseCoeffsBase.h b/latan/Eigen/src/Core/DenseCoeffsBase.h new file mode 100644 index 0000000..72704c2 --- /dev/null +++ b/latan/Eigen/src/Core/DenseCoeffsBase.h @@ -0,0 +1,754 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSECOEFFSBASE_H +#define EIGEN_DENSECOEFFSBASE_H + +namespace Eigen { + +namespace internal { +template struct add_const_on_value_type_if_arithmetic +{ + typedef typename conditional::value, T, typename add_const_on_value_type::type>::type type; +}; +} + +/** \brief Base class providing read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * \tparam #ReadOnlyAccessors Constant indicating read-only access + * + * This class defines the \c operator() \c const function and friends, which can be used to read specific + * entries of a matrix or array. + * + * \sa DenseCoeffsBase, DenseCoeffsBase, + * \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public EigenBase +{ + public: + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + + // Explanation for this CoeffReturnType typedef. + // - This is the return type of the coeff() method. + // - The LvalueBit means exactly that we can offer a coeffRef() method, which means exactly that we can get references + // to coeffs, which means exactly that we can have coeff() return a const reference (as opposed to returning a value). + // - The is_artihmetic check is required since "const int", "const double", etc. will cause warnings on some systems + // while the declaration of "const T", where T is a non arithmetic type does not. Always returning "const Scalar&" is + // not possible, since the underlying expressions might not offer a valid address the reference could be referring to. + typedef typename internal::conditional::Flags&LvalueBit), + const Scalar&, + typename internal::conditional::value, Scalar, const Scalar>::type + >::type CoeffReturnType; + + typedef typename internal::add_const_on_value_type_if_arithmetic< + typename internal::packet_traits::type + >::type PacketReturnType; + + typedef EigenBase Base; + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + EIGEN_STRONG_INLINE Index rowIndexByOuterInner(Index outer, Index inner) const + { + return int(Derived::RowsAtCompileTime) == 1 ? 0 + : int(Derived::ColsAtCompileTime) == 1 ? inner + : int(Derived::Flags)&RowMajorBit ? outer + : inner; + } + + EIGEN_STRONG_INLINE Index colIndexByOuterInner(Index outer, Index inner) const + { + return int(Derived::ColsAtCompileTime) == 1 ? 0 + : int(Derived::RowsAtCompileTime) == 1 ? inner + : int(Derived::Flags)&RowMajorBit ? inner + : outer; + } + + /** Short version: don't use this function, use + * \link operator()(Index,Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) const \endlink. + * + * \sa operator()(Index,Index) const, coeffRef(Index,Index), coeff(Index) const + */ + EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeff(row, col); + } + + EIGEN_STRONG_INLINE CoeffReturnType coeffByOuterInner(Index outer, Index inner) const + { + return coeff(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \returns the coefficient at given the given row and column. + * + * \sa operator()(Index,Index), operator[](Index) + */ + EIGEN_STRONG_INLINE CoeffReturnType operator()(Index row, Index col) const + { + eigen_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeff(row, col); + } + + /** Short version: don't use this function, use + * \link operator[](Index) const \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameter \a index is in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) const \endlink. + * + * \sa operator[](Index) const, coeffRef(Index), coeff(Index,Index) const + */ + + EIGEN_STRONG_INLINE CoeffReturnType + coeff(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return derived().coeff(index); + } + + + /** \returns the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_STRONG_INLINE CoeffReturnType + operator[](Index index) const + { + #ifndef EIGEN2_SUPPORT + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + #endif + eigen_assert(index >= 0 && index < size()); + return derived().coeff(index); + } + + /** \returns the coefficient at given index. + * + * This is synonymous to operator[](Index) const. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index), operator()(Index,Index) const, x() const, y() const, + * z() const, w() const + */ + + EIGEN_STRONG_INLINE CoeffReturnType + operator()(Index index) const + { + eigen_assert(index >= 0 && index < size()); + return derived().coeff(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_STRONG_INLINE CoeffReturnType + x() const { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_STRONG_INLINE CoeffReturnType + y() const { return (*this)[1]; } + + /** equivalent to operator[](2). */ + + EIGEN_STRONG_INLINE CoeffReturnType + z() const { return (*this)[2]; } + + /** equivalent to operator[](3). */ + + EIGEN_STRONG_INLINE CoeffReturnType + w() const { return (*this)[3]; } + + /** \internal + * \returns the packet of coefficients starting at the given row and column. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index row, Index col) const + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().template packet(row,col); + } + + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketReturnType packetByOuterInner(Index outer, Index inner) const + { + return packet(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \internal + * \returns the packet of coefficients starting at the given index. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const + { + eigen_internal_assert(index >= 0 && index < size()); + return derived().template packet(index); + } + + protected: + // explanation: DenseBase is doing "using ..." on the methods from DenseCoeffsBase. + // But some methods are only available in the DirectAccess case. + // So we add dummy methods here with these names, so that "using... " doesn't fail. + // It's not private so that the child class DenseBase can access them, and it's not public + // either since it's an implementation detail, so has to be protected. + void coeffRef(); + void coeffRefByOuterInner(); + void writePacket(); + void writePacketByOuterInner(); + void copyCoeff(); + void copyCoeffByOuterInner(); + void copyPacket(); + void copyPacketByOuterInner(); + void stride(); + void innerStride(); + void outerStride(); + void rowStride(); + void colStride(); +}; + +/** \brief Base class providing read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * \tparam #WriteAccessors Constant indicating read/write access + * + * This class defines the non-const \c operator() function and friends, which can be used to write specific + * entries of a matrix or array. This class inherits DenseCoeffsBase which + * defines the const variant for reading specific entries. + * + * \sa DenseCoeffsBase, \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + using Base::coeff; + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + using Base::rowIndexByOuterInner; + using Base::colIndexByOuterInner; + using Base::operator[]; + using Base::operator(); + using Base::x; + using Base::y; + using Base::z; + using Base::w; + + /** Short version: don't use this function, use + * \link operator()(Index,Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator()(Index,Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(Index,Index) \endlink. + * + * \sa operator()(Index,Index), coeff(Index, Index) const, coeffRef(Index) + */ + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeffRef(row, col); + } + + EIGEN_STRONG_INLINE Scalar& + coeffRefByOuterInner(Index outer, Index inner) + { + return coeffRef(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner)); + } + + /** \returns a reference to the coefficient at given the given row and column. + * + * \sa operator[](Index) + */ + + EIGEN_STRONG_INLINE Scalar& + operator()(Index row, Index col) + { + eigen_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeffRef(row, col); + } + + + /** Short version: don't use this function, use + * \link operator[](Index) \endlink instead. + * + * Long version: this function is similar to + * \link operator[](Index) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](Index) \endlink. + * + * \sa operator[](Index), coeff(Index) const, coeffRef(Index,Index) + */ + + EIGEN_STRONG_INLINE Scalar& + coeffRef(Index index) + { + eigen_internal_assert(index >= 0 && index < size()); + return derived().coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_STRONG_INLINE Scalar& + operator[](Index index) + { + #ifndef EIGEN2_SUPPORT + EIGEN_STATIC_ASSERT(Derived::IsVectorAtCompileTime, + THE_BRACKET_OPERATOR_IS_ONLY_FOR_VECTORS__USE_THE_PARENTHESIS_OPERATOR_INSTEAD) + #endif + eigen_assert(index >= 0 && index < size()); + return derived().coeffRef(index); + } + + /** \returns a reference to the coefficient at given index. + * + * This is synonymous to operator[](Index). + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](Index) const, operator()(Index,Index), x(), y(), z(), w() + */ + + EIGEN_STRONG_INLINE Scalar& + operator()(Index index) + { + eigen_assert(index >= 0 && index < size()); + return derived().coeffRef(index); + } + + /** equivalent to operator[](0). */ + + EIGEN_STRONG_INLINE Scalar& + x() { return (*this)[0]; } + + /** equivalent to operator[](1). */ + + EIGEN_STRONG_INLINE Scalar& + y() { return (*this)[1]; } + + /** equivalent to operator[](2). */ + + EIGEN_STRONG_INLINE Scalar& + z() { return (*this)[2]; } + + /** equivalent to operator[](3). */ + + EIGEN_STRONG_INLINE Scalar& + w() { return (*this)[3]; } + + /** \internal + * Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a #Aligned or \a #Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + + template + EIGEN_STRONG_INLINE void writePacket + (Index row, Index col, const typename internal::packet_traits::type& x) + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().template writePacket(row,col,x); + } + + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacketByOuterInner + (Index outer, Index inner, const typename internal::packet_traits::type& x) + { + writePacket(rowIndexByOuterInner(outer, inner), + colIndexByOuterInner(outer, inner), + x); + } + + /** \internal + * Stores the given packet of coefficients, at the given index in this expression. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ + template + EIGEN_STRONG_INLINE void writePacket + (Index index, const typename internal::packet_traits::type& x) + { + eigen_internal_assert(index >= 0 && index < size()); + derived().template writePacket(index,x); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + + /** \internal Copies the coefficient at position (row,col) of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ + + template + EIGEN_STRONG_INLINE void copyCoeff(Index row, Index col, const DenseBase& other) + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().coeffRef(row, col) = other.derived().coeff(row, col); + } + + /** \internal Copies the coefficient at the given index of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ + + template + EIGEN_STRONG_INLINE void copyCoeff(Index index, const DenseBase& other) + { + eigen_internal_assert(index >= 0 && index < size()); + derived().coeffRef(index) = other.derived().coeff(index); + } + + + template + EIGEN_STRONG_INLINE void copyCoeffByOuterInner(Index outer, Index inner, const DenseBase& other) + { + const Index row = rowIndexByOuterInner(outer,inner); + const Index col = colIndexByOuterInner(outer,inner); + // derived() is important here: copyCoeff() may be reimplemented in Derived! + derived().copyCoeff(row, col, other); + } + + /** \internal Copies the packet at position (row,col) of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ + + template + EIGEN_STRONG_INLINE void copyPacket(Index row, Index col, const DenseBase& other) + { + eigen_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().template writePacket(row, col, + other.derived().template packet(row, col)); + } + + /** \internal Copies the packet at the given index of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ + + template + EIGEN_STRONG_INLINE void copyPacket(Index index, const DenseBase& other) + { + eigen_internal_assert(index >= 0 && index < size()); + derived().template writePacket(index, + other.derived().template packet(index)); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void copyPacketByOuterInner(Index outer, Index inner, const DenseBase& other) + { + const Index row = rowIndexByOuterInner(outer,inner); + const Index col = colIndexByOuterInner(outer,inner); + // derived() is important here: copyCoeff() may be reimplemented in Derived! + derived().template copyPacket< OtherDerived, StoreMode, LoadMode>(row, col, other); + } +#endif + +}; + +/** \brief Base class providing direct read-only coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * \tparam #DirectAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read-only using + * \c operator() . + * + * \sa \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + inline Index innerStride() const + { + return derived().innerStride(); + } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + inline Index outerStride() const + { + return derived().outerStride(); + } + + // FIXME shall we remove it ? + inline Index stride() const + { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + inline Index rowStride() const + { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + inline Index colStride() const + { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +/** \brief Base class providing direct read/write coefficient access to matrices and arrays. + * \ingroup Core_Module + * \tparam Derived Type of the derived class + * \tparam #DirectWriteAccessors Constant indicating direct access + * + * This class defines functions to work with strides which can be used to access entries directly. This class + * inherits DenseCoeffsBase which defines functions to access entries read/write using + * \c operator(). + * + * \sa \ref TopicClassHierarchy + */ +template +class DenseCoeffsBase + : public DenseCoeffsBase +{ + public: + + typedef DenseCoeffsBase Base; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::derived; + + /** \returns the pointer increment between two consecutive elements within a slice in the inner direction. + * + * \sa outerStride(), rowStride(), colStride() + */ + inline Index innerStride() const + { + return derived().innerStride(); + } + + /** \returns the pointer increment between two consecutive inner slices (for example, between two consecutive columns + * in a column-major matrix). + * + * \sa innerStride(), rowStride(), colStride() + */ + inline Index outerStride() const + { + return derived().outerStride(); + } + + // FIXME shall we remove it ? + inline Index stride() const + { + return Derived::IsVectorAtCompileTime ? innerStride() : outerStride(); + } + + /** \returns the pointer increment between two consecutive rows. + * + * \sa innerStride(), outerStride(), colStride() + */ + inline Index rowStride() const + { + return Derived::IsRowMajor ? outerStride() : innerStride(); + } + + /** \returns the pointer increment between two consecutive columns. + * + * \sa innerStride(), outerStride(), rowStride() + */ + inline Index colStride() const + { + return Derived::IsRowMajor ? innerStride() : outerStride(); + } +}; + +namespace internal { + +template +struct first_aligned_impl +{ + static inline typename Derived::Index run(const Derived&) + { return 0; } +}; + +template +struct first_aligned_impl +{ + static inline typename Derived::Index run(const Derived& m) + { + return internal::first_aligned(&m.const_cast_derived().coeffRef(0,0), m.size()); + } +}; + +/** \internal \returns the index of the first element of the array that is well aligned for vectorization. + * + * There is also the variant first_aligned(const Scalar*, Integer) defined in Memory.h. See it for more + * documentation. + */ +template +static inline typename Derived::Index first_aligned(const Derived& m) +{ + return first_aligned_impl + + ::run(m); +} + +template::ret> +struct inner_stride_at_compile_time +{ + enum { ret = traits::InnerStrideAtCompileTime }; +}; + +template +struct inner_stride_at_compile_time +{ + enum { ret = 0 }; +}; + +template::ret> +struct outer_stride_at_compile_time +{ + enum { ret = traits::OuterStrideAtCompileTime }; +}; + +template +struct outer_stride_at_compile_time +{ + enum { ret = 0 }; +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSECOEFFSBASE_H diff --git a/latan/Eigen/src/Core/DenseStorage.h b/latan/Eigen/src/Core/DenseStorage.h new file mode 100644 index 0000000..4276efe --- /dev/null +++ b/latan/Eigen/src/Core/DenseStorage.h @@ -0,0 +1,314 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2010 Hauke Heibel +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXSTORAGE_H +#define EIGEN_MATRIXSTORAGE_H + +#ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN EIGEN_DENSE_STORAGE_CTOR_PLUGIN; +#else + #define EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN +#endif + +namespace Eigen { + +namespace internal { + +struct constructor_without_unaligned_array_assert {}; + +/** \internal + * Static array. If the MatrixOrArrayOptions require auto-alignment, the array will be automatically aligned: + * to 16 bytes boundary if the total size is a multiple of 16 bytes. + */ +template +struct plain_array +{ + T array[Size]; + plain_array() {} + plain_array(constructor_without_unaligned_array_assert) {} +}; + +#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) +#elif EIGEN_GNUC_AT_LEAST(4,7) + // GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned. + // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 + // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: + template + EIGEN_ALWAYS_INLINE PtrType eigen_unaligned_array_assert_workaround_gcc47(PtrType array) { return array; } + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ + eigen_assert((reinterpret_cast(eigen_unaligned_array_assert_workaround_gcc47(array)) & sizemask) == 0 \ + && "this assertion is explained here: " \ + "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ + " **** READ THIS WEB PAGE !!! ****"); +#else + #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) \ + eigen_assert((reinterpret_cast(array) & sizemask) == 0 \ + && "this assertion is explained here: " \ + "http://eigen.tuxfamily.org/dox-devel/group__TopicUnalignedArrayAssert.html" \ + " **** READ THIS WEB PAGE !!! ****"); +#endif + +template +struct plain_array +{ + EIGEN_USER_ALIGN16 T array[Size]; + plain_array() { EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(0xf) } + plain_array(constructor_without_unaligned_array_assert) {} +}; + +template +struct plain_array +{ + EIGEN_USER_ALIGN16 T array[1]; + plain_array() {} + plain_array(constructor_without_unaligned_array_assert) {} +}; + +} // end namespace internal + +/** \internal + * + * \class DenseStorage + * \ingroup Core_Module + * + * \brief Stores the data of a matrix + * + * This class stores the data of fixed-size, dynamic-size or mixed matrices + * in a way as compact as possible. + * + * \sa Matrix + */ +template class DenseStorage; + +// purely fixed-size matrix +template class DenseStorage +{ + internal::plain_array m_data; + public: + inline explicit DenseStorage() {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()) {} + inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); } + static inline DenseIndex rows(void) {return _Rows;} + static inline DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + inline void resize(DenseIndex,DenseIndex,DenseIndex) {} + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// null matrix +template class DenseStorage +{ + public: + inline explicit DenseStorage() {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) {} + inline DenseStorage(DenseIndex,DenseIndex,DenseIndex) {} + inline void swap(DenseStorage& ) {} + static inline DenseIndex rows(void) {return _Rows;} + static inline DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex,DenseIndex,DenseIndex) {} + inline void resize(DenseIndex,DenseIndex,DenseIndex) {} + inline const T *data() const { return 0; } + inline T *data() { return 0; } +}; + +// more specializations for null matrices; these are necessary to resolve ambiguities +template class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + +template class DenseStorage +: public DenseStorage { }; + +// dynamic-size matrix with fixed-size storage +template class DenseStorage +{ + internal::plain_array m_data; + DenseIndex m_rows; + DenseIndex m_cols; + public: + inline explicit DenseStorage() : m_rows(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} + inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex cols) : m_rows(rows), m_cols(cols) {} + inline void swap(DenseStorage& other) + { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } + inline void resize(DenseIndex, DenseIndex rows, DenseIndex cols) { m_rows = rows; m_cols = cols; } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed width +template class DenseStorage +{ + internal::plain_array m_data; + DenseIndex m_rows; + public: + inline explicit DenseStorage() : m_rows(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_rows(0) {} + inline DenseStorage(DenseIndex, DenseIndex rows, DenseIndex) : m_rows(rows) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return _Cols;} + inline void conservativeResize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } + inline void resize(DenseIndex, DenseIndex rows, DenseIndex) { m_rows = rows; } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed height +template class DenseStorage +{ + internal::plain_array m_data; + DenseIndex m_cols; + public: + inline explicit DenseStorage() : m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(internal::constructor_without_unaligned_array_assert()), m_cols(0) {} + inline DenseStorage(DenseIndex, DenseIndex, DenseIndex cols) : m_cols(cols) {} + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + inline DenseIndex rows(void) const {return _Rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } + inline void resize(DenseIndex, DenseIndex, DenseIndex cols) { m_cols = cols; } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// purely dynamic matrix. +template class DenseStorage +{ + T *m_data; + DenseIndex m_rows; + DenseIndex m_cols; + public: + inline explicit DenseStorage() : m_data(0), m_rows(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) + : m_data(0), m_rows(0), m_cols(0) {} + inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex cols) + : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows), m_cols(cols) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); } + inline void swap(DenseStorage& other) + { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } + inline DenseIndex rows(void) const {return m_rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex cols) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*m_cols); + m_rows = rows; + m_cols = cols; + } + void resize(DenseIndex size, DenseIndex rows, DenseIndex cols) + { + if(size != m_rows*m_cols) + { + internal::conditional_aligned_delete_auto(m_data, m_rows*m_cols); + if (size) + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN + } + m_rows = rows; + m_cols = cols; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +// matrix with dynamic width and fixed height (so that matrix has dynamic size). +template class DenseStorage +{ + T *m_data; + DenseIndex m_cols; + public: + inline explicit DenseStorage() : m_data(0), m_cols(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} + inline DenseStorage(DenseIndex size, DenseIndex, DenseIndex cols) : m_data(internal::conditional_aligned_new_auto(size)), m_cols(cols) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); } + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + static inline DenseIndex rows(void) {return _Rows;} + inline DenseIndex cols(void) const {return m_cols;} + inline void conservativeResize(DenseIndex size, DenseIndex, DenseIndex cols) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, _Rows*m_cols); + m_cols = cols; + } + EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex, DenseIndex cols) + { + if(size != _Rows*m_cols) + { + internal::conditional_aligned_delete_auto(m_data, _Rows*m_cols); + if (size) + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN + } + m_cols = cols; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +// matrix with dynamic height and fixed width (so that matrix has dynamic size). +template class DenseStorage +{ + T *m_data; + DenseIndex m_rows; + public: + inline explicit DenseStorage() : m_data(0), m_rows(0) {} + inline DenseStorage(internal::constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} + inline DenseStorage(DenseIndex size, DenseIndex rows, DenseIndex) : m_data(internal::conditional_aligned_new_auto(size)), m_rows(rows) + { EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN } + inline ~DenseStorage() { internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); } + inline void swap(DenseStorage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline DenseIndex rows(void) const {return m_rows;} + static inline DenseIndex cols(void) {return _Cols;} + inline void conservativeResize(DenseIndex size, DenseIndex rows, DenseIndex) + { + m_data = internal::conditional_aligned_realloc_new_auto(m_data, size, m_rows*_Cols); + m_rows = rows; + } + EIGEN_STRONG_INLINE void resize(DenseIndex size, DenseIndex rows, DenseIndex) + { + if(size != m_rows*_Cols) + { + internal::conditional_aligned_delete_auto(m_data, _Cols*m_rows); + if (size) + m_data = internal::conditional_aligned_new_auto(size); + else + m_data = 0; + EIGEN_INTERNAL_DENSE_STORAGE_CTOR_PLUGIN + } + m_rows = rows; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/latan/Eigen/src/Core/Diagonal.h b/latan/Eigen/src/Core/Diagonal.h new file mode 100644 index 0000000..1626196 --- /dev/null +++ b/latan/Eigen/src/Core/Diagonal.h @@ -0,0 +1,236 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2009 Benoit Jacob +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONAL_H +#define EIGEN_DIAGONAL_H + +namespace Eigen { + +/** \class Diagonal + * \ingroup Core_Module + * + * \brief Expression of a diagonal/subdiagonal/superdiagonal in a matrix + * + * \param MatrixType the type of the object in which we are taking a sub/main/super diagonal + * \param DiagIndex the index of the sub/super diagonal. The default is 0 and it means the main diagonal. + * A positive value means a superdiagonal, a negative value means a subdiagonal. + * You can also use Dynamic so the index can be set at runtime. + * + * The matrix is not required to be square. + * + * This class represents an expression of the main diagonal, or any sub/super diagonal + * of a square matrix. It is the return type of MatrixBase::diagonal() and MatrixBase::diagonal(Index) and most of the + * time this is the only way it is used. + * + * \sa MatrixBase::diagonal(), MatrixBase::diagonal(Index) + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + typedef typename MatrixType::StorageKind StorageKind; + enum { + RowsAtCompileTime = (int(DiagIndex) == Dynamic || int(MatrixType::SizeAtCompileTime) == Dynamic) ? Dynamic + : (EIGEN_PLAIN_ENUM_MIN(MatrixType::RowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), + MatrixType::ColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic + : DiagIndex == Dynamic ? EIGEN_SIZE_MIN_PREFER_FIXED(MatrixType::MaxRowsAtCompileTime, + MatrixType::MaxColsAtCompileTime) + : (EIGEN_PLAIN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime - EIGEN_PLAIN_ENUM_MAX(-DiagIndex, 0), + MatrixType::MaxColsAtCompileTime - EIGEN_PLAIN_ENUM_MAX( DiagIndex, 0))), + MaxColsAtCompileTime = 1, + MaskLvalueBit = is_lvalue::value ? LvalueBit : 0, + Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit | MaskLvalueBit | DirectAccessBit) & ~RowMajorBit, + CoeffReadCost = _MatrixTypeNested::CoeffReadCost, + MatrixTypeOuterStride = outer_stride_at_compile_time::ret, + InnerStrideAtCompileTime = MatrixTypeOuterStride == Dynamic ? Dynamic : MatrixTypeOuterStride+1, + OuterStrideAtCompileTime = 0 + }; +}; +} + +template class Diagonal + : public internal::dense_xpr_base< Diagonal >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Diagonal) + + inline Diagonal(MatrixType& matrix, Index index = DiagIndex) : m_matrix(matrix), m_index(index) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Diagonal) + + inline Index rows() const + { return m_index.value()<0 ? (std::min)(m_matrix.cols(),m_matrix.rows()+m_index.value()) : (std::min)(m_matrix.rows(),m_matrix.cols()-m_index.value()); } + + inline Index cols() const { return 1; } + + inline Index innerStride() const + { + return m_matrix.outerStride() + 1; + } + + inline Index outerStride() const + { + return 0; + } + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline ScalarWithConstIfNotLvalue* data() { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } + inline const Scalar* data() const { return &(m_matrix.const_cast_derived().coeffRef(rowOffset(), colOffset())); } + + inline Scalar& coeffRef(Index row, Index) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); + } + + inline const Scalar& coeffRef(Index row, Index) const + { + return m_matrix.const_cast_derived().coeffRef(row+rowOffset(), row+colOffset()); + } + + inline CoeffReturnType coeff(Index row, Index) const + { + return m_matrix.coeff(row+rowOffset(), row+colOffset()); + } + + inline Scalar& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_LVALUE(MatrixType) + return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_matrix.const_cast_derived().coeffRef(index+rowOffset(), index+colOffset()); + } + + inline CoeffReturnType coeff(Index index) const + { + return m_matrix.coeff(index+rowOffset(), index+colOffset()); + } + + const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + + int index() const + { + return m_index.value(); + } + + protected: + typename MatrixType::Nested m_matrix; + const internal::variable_if_dynamic m_index; + + private: + // some compilers may fail to optimize std::max etc in case of compile-time constants... + EIGEN_STRONG_INLINE Index absDiagIndex() const { return m_index.value()>0 ? m_index.value() : -m_index.value(); } + EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value()>0 ? 0 : -m_index.value(); } + EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value()>0 ? m_index.value() : 0; } + // triger a compile time error is someone try to call packet + template typename MatrixType::PacketReturnType packet(Index) const; + template typename MatrixType::PacketReturnType packet(Index,Index) const; +}; + +/** \returns an expression of the main diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * Example: \include MatrixBase_diagonal.cpp + * Output: \verbinclude MatrixBase_diagonal.out + * + * \sa class Diagonal */ +template +inline typename MatrixBase::DiagonalReturnType +MatrixBase::diagonal() +{ + return derived(); +} + +/** This is the const version of diagonal(). */ +template +inline const typename MatrixBase::ConstDiagonalReturnType +MatrixBase::diagonal() const +{ + return ConstDiagonalReturnType(derived()); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_int.cpp + * Output: \verbinclude MatrixBase_diagonal_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +inline typename MatrixBase::template DiagonalIndexReturnType::Type +MatrixBase::diagonal(Index index) +{ + return typename DiagonalIndexReturnType::Type(derived(), index); +} + +/** This is the const version of diagonal(Index). */ +template +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type +MatrixBase::diagonal(Index index) const +{ + return typename ConstDiagonalIndexReturnType::Type(derived(), index); +} + +/** \returns an expression of the \a DiagIndex-th sub or super diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * The template parameter \a DiagIndex represent a super diagonal if \a DiagIndex > 0 + * and a sub diagonal otherwise. \a DiagIndex == 0 is equivalent to the main diagonal. + * + * Example: \include MatrixBase_diagonal_template_int.cpp + * Output: \verbinclude MatrixBase_diagonal_template_int.out + * + * \sa MatrixBase::diagonal(), class Diagonal */ +template +template +inline typename MatrixBase::template DiagonalIndexReturnType::Type +MatrixBase::diagonal() +{ + return derived(); +} + +/** This is the const version of diagonal(). */ +template +template +inline typename MatrixBase::template ConstDiagonalIndexReturnType::Type +MatrixBase::diagonal() const +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONAL_H diff --git a/latan/Eigen/src/Core/DiagonalMatrix.h b/latan/Eigen/src/Core/DiagonalMatrix.h new file mode 100644 index 0000000..6e8b50f --- /dev/null +++ b/latan/Eigen/src/Core/DiagonalMatrix.h @@ -0,0 +1,307 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALMATRIX_H +#define EIGEN_DIAGONALMATRIX_H + +namespace Eigen { + +#ifndef EIGEN_PARSED_BY_DOXYGEN +template +class DiagonalBase : public EigenBase +{ + public: + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::RealScalar RealScalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::MaxSizeAtCompileTime, + IsVectorAtCompileTime = 0, + Flags = 0 + }; + + typedef Matrix DenseMatrixType; + typedef DenseMatrixType DenseType; + typedef DiagonalMatrix PlainObject; + + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + + DenseMatrixType toDenseMatrix() const { return derived(); } + template + void evalTo(MatrixBase &other) const; + template + void addTo(MatrixBase &other) const + { other.diagonal() += diagonal(); } + template + void subTo(MatrixBase &other) const + { other.diagonal() -= diagonal(); } + + inline const DiagonalVectorType& diagonal() const { return derived().diagonal(); } + inline DiagonalVectorType& diagonal() { return derived().diagonal(); } + + inline Index rows() const { return diagonal().size(); } + inline Index cols() const { return diagonal().size(); } + + template + const DiagonalProduct + operator*(const MatrixBase &matrix) const; + + inline const DiagonalWrapper, const DiagonalVectorType> > + inverse() const + { + return diagonal().cwiseInverse(); + } + + inline const DiagonalWrapper, const DiagonalVectorType> > + operator*(const Scalar& scalar) const + { + return diagonal() * scalar; + } + friend inline const DiagonalWrapper, const DiagonalVectorType> > + operator*(const Scalar& scalar, const DiagonalBase& other) + { + return other.diagonal() * scalar; + } + + #ifdef EIGEN2_SUPPORT + template + bool isApprox(const DiagonalBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return diagonal().isApprox(other.diagonal(), precision); + } + template + bool isApprox(const MatrixBase& other, typename NumTraits::Real precision = NumTraits::dummy_precision()) const + { + return toDenseMatrix().isApprox(other, precision); + } + #endif +}; + +template +template +void DiagonalBase::evalTo(MatrixBase &other) const +{ + other.setZero(); + other.diagonal() = diagonal(); +} +#endif + +/** \class DiagonalMatrix + * \ingroup Core_Module + * + * \brief Represents a diagonal matrix with its storage + * + * \param _Scalar the type of coefficients + * \param SizeAtCompileTime the dimension of the matrix, or Dynamic + * \param MaxSizeAtCompileTime the dimension of the matrix, or Dynamic. This parameter is optional and defaults + * to SizeAtCompileTime. Most of the time, you do not need to specify it. + * + * \sa class DiagonalWrapper + */ + +namespace internal { +template +struct traits > + : traits > +{ + typedef Matrix<_Scalar,SizeAtCompileTime,1,0,MaxSizeAtCompileTime,1> DiagonalVectorType; + typedef Dense StorageKind; + typedef DenseIndex Index; + enum { + Flags = LvalueBit + }; +}; +} +template +class DiagonalMatrix + : public DiagonalBase > +{ + public: + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename internal::traits::DiagonalVectorType DiagonalVectorType; + typedef const DiagonalMatrix& Nested; + typedef _Scalar Scalar; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + #endif + + protected: + + DiagonalVectorType m_diagonal; + + public: + + /** const version of diagonal(). */ + inline const DiagonalVectorType& diagonal() const { return m_diagonal; } + /** \returns a reference to the stored vector of diagonal coefficients. */ + inline DiagonalVectorType& diagonal() { return m_diagonal; } + + /** Default constructor without initialization */ + inline DiagonalMatrix() {} + + /** Constructs a diagonal matrix with given dimension */ + inline DiagonalMatrix(Index dim) : m_diagonal(dim) {} + + /** 2D constructor. */ + inline DiagonalMatrix(const Scalar& x, const Scalar& y) : m_diagonal(x,y) {} + + /** 3D constructor. */ + inline DiagonalMatrix(const Scalar& x, const Scalar& y, const Scalar& z) : m_diagonal(x,y,z) {} + + /** Copy constructor. */ + template + inline DiagonalMatrix(const DiagonalBase& other) : m_diagonal(other.diagonal()) {} + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** copy constructor. prevent a default copy constructor from hiding the other templated constructor */ + inline DiagonalMatrix(const DiagonalMatrix& other) : m_diagonal(other.diagonal()) {} + #endif + + /** generic constructor from expression of the diagonal coefficients */ + template + explicit inline DiagonalMatrix(const MatrixBase& other) : m_diagonal(other) + {} + + /** Copy operator. */ + template + DiagonalMatrix& operator=(const DiagonalBase& other) + { + m_diagonal = other.diagonal(); + return *this; + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + DiagonalMatrix& operator=(const DiagonalMatrix& other) + { + m_diagonal = other.diagonal(); + return *this; + } + #endif + + /** Resizes to given size. */ + inline void resize(Index size) { m_diagonal.resize(size); } + /** Sets all coefficients to zero. */ + inline void setZero() { m_diagonal.setZero(); } + /** Resizes and sets all coefficients to zero. */ + inline void setZero(Index size) { m_diagonal.setZero(size); } + /** Sets this matrix to be the identity matrix of the current size. */ + inline void setIdentity() { m_diagonal.setOnes(); } + /** Sets this matrix to be the identity matrix of the given size. */ + inline void setIdentity(Index size) { m_diagonal.setOnes(size); } +}; + +/** \class DiagonalWrapper + * \ingroup Core_Module + * + * \brief Expression of a diagonal matrix + * + * \param _DiagonalVectorType the type of the vector of diagonal coefficients + * + * This class is an expression of a diagonal matrix, but not storing its own vector of diagonal coefficients, + * instead wrapping an existing vector expression. It is the return type of MatrixBase::asDiagonal() + * and most of the time this is the only way that it is used. + * + * \sa class DiagonalMatrix, class DiagonalBase, MatrixBase::asDiagonal() + */ + +namespace internal { +template +struct traits > +{ + typedef _DiagonalVectorType DiagonalVectorType; + typedef typename DiagonalVectorType::Scalar Scalar; + typedef typename DiagonalVectorType::Index Index; + typedef typename DiagonalVectorType::StorageKind StorageKind; + enum { + RowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + ColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + MaxColsAtCompileTime = DiagonalVectorType::SizeAtCompileTime, + Flags = traits::Flags & LvalueBit + }; +}; +} + +template +class DiagonalWrapper + : public DiagonalBase >, internal::no_assignment_operator +{ + public: + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef _DiagonalVectorType DiagonalVectorType; + typedef DiagonalWrapper Nested; + #endif + + /** Constructor from expression of diagonal coefficients to wrap. */ + inline DiagonalWrapper(DiagonalVectorType& diagonal) : m_diagonal(diagonal) {} + + /** \returns a const reference to the wrapped expression of diagonal coefficients. */ + const DiagonalVectorType& diagonal() const { return m_diagonal; } + + protected: + typename DiagonalVectorType::Nested m_diagonal; +}; + +/** \returns a pseudo-expression of a diagonal matrix with *this as vector of diagonal coefficients + * + * \only_for_vectors + * + * Example: \include MatrixBase_asDiagonal.cpp + * Output: \verbinclude MatrixBase_asDiagonal.out + * + * \sa class DiagonalWrapper, class DiagonalMatrix, diagonal(), isDiagonal() + **/ +template +inline const DiagonalWrapper +MatrixBase::asDiagonal() const +{ + return derived(); +} + +/** \returns true if *this is approximately equal to a diagonal matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isDiagonal.cpp + * Output: \verbinclude MatrixBase_isDiagonal.out + * + * \sa asDiagonal() + */ +template +bool MatrixBase::isDiagonal(RealScalar prec) const +{ + if(cols() != rows()) return false; + RealScalar maxAbsOnDiagonal = static_cast(-1); + for(Index j = 0; j < cols(); ++j) + { + RealScalar absOnDiagonal = internal::abs(coeff(j,j)); + if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; + } + for(Index j = 0; j < cols(); ++j) + for(Index i = 0; i < j; ++i) + { + if(!internal::isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; + if(!internal::isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; + } + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALMATRIX_H diff --git a/latan/Eigen/src/Core/DiagonalProduct.h b/latan/Eigen/src/Core/DiagonalProduct.h new file mode 100644 index 0000000..598c6b3 --- /dev/null +++ b/latan/Eigen/src/Core/DiagonalProduct.h @@ -0,0 +1,123 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2007-2009 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DIAGONALPRODUCT_H +#define EIGEN_DIAGONALPRODUCT_H + +namespace Eigen { + +namespace internal { +template +struct traits > + : traits +{ + typedef typename scalar_product_traits::ReturnType Scalar; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + + _StorageOrder = MatrixType::Flags & RowMajorBit ? RowMajor : ColMajor, + _PacketOnDiag = !((int(_StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) + ||(int(_StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), + _SameTypes = is_same::value, + // FIXME currently we need same types, but in the future the next rule should be the one + //_Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && ((!_PacketOnDiag) || (_SameTypes && bool(int(DiagonalType::Flags)&PacketAccessBit))), + _Vectorizable = bool(int(MatrixType::Flags)&PacketAccessBit) && _SameTypes && ((!_PacketOnDiag) || (bool(int(DiagonalType::Flags)&PacketAccessBit))), + + Flags = (HereditaryBits & (unsigned int)(MatrixType::Flags)) | (_Vectorizable ? PacketAccessBit : 0), + CoeffReadCost = NumTraits::MulCost + MatrixType::CoeffReadCost + DiagonalType::DiagonalVectorType::CoeffReadCost + }; +}; +} + +template +class DiagonalProduct : internal::no_assignment_operator, + public MatrixBase > +{ + public: + + typedef MatrixBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(DiagonalProduct) + + inline DiagonalProduct(const MatrixType& matrix, const DiagonalType& diagonal) + : m_matrix(matrix), m_diagonal(diagonal) + { + eigen_assert(diagonal.diagonal().size() == (ProductOrder == OnTheLeft ? matrix.rows() : matrix.cols())); + } + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + const Scalar coeff(Index row, Index col) const + { + return m_diagonal.diagonal().coeff(ProductOrder == OnTheLeft ? row : col) * m_matrix.coeff(row, col); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + { + enum { + StorageOrder = Flags & RowMajorBit ? RowMajor : ColMajor + }; + const Index indexInDiagonalVector = ProductOrder == OnTheLeft ? row : col; + + return packet_impl(row,col,indexInDiagonalVector,typename internal::conditional< + ((int(StorageOrder) == RowMajor && int(ProductOrder) == OnTheLeft) + ||(int(StorageOrder) == ColMajor && int(ProductOrder) == OnTheRight)), internal::true_type, internal::false_type>::type()); + } + + protected: + template + EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::true_type) const + { + return internal::pmul(m_matrix.template packet(row, col), + internal::pset1(m_diagonal.diagonal().coeff(id))); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet_impl(Index row, Index col, Index id, internal::false_type) const + { + enum { + InnerSize = (MatrixType::Flags & RowMajorBit) ? MatrixType::ColsAtCompileTime : MatrixType::RowsAtCompileTime, + DiagonalVectorPacketLoadMode = (LoadMode == Aligned && ((InnerSize%16) == 0)) ? Aligned : Unaligned + }; + return internal::pmul(m_matrix.template packet(row, col), + m_diagonal.diagonal().template packet(id)); + } + + typename MatrixType::Nested m_matrix; + typename DiagonalType::Nested m_diagonal; +}; + +/** \returns the diagonal matrix product of \c *this by the diagonal matrix \a diagonal. + */ +template +template +inline const DiagonalProduct +MatrixBase::operator*(const DiagonalBase &diagonal) const +{ + return DiagonalProduct(derived(), diagonal.derived()); +} + +/** \returns the diagonal matrix product of \c *this by the matrix \a matrix. + */ +template +template +inline const DiagonalProduct +DiagonalBase::operator*(const MatrixBase &matrix) const +{ + return DiagonalProduct(matrix.derived(), derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_DIAGONALPRODUCT_H diff --git a/latan/Eigen/src/Core/Dot.h b/latan/Eigen/src/Core/Dot.h new file mode 100644 index 0000000..ae9274e --- /dev/null +++ b/latan/Eigen/src/Core/Dot.h @@ -0,0 +1,261 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008, 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DOT_H +#define EIGEN_DOT_H + +namespace Eigen { + +namespace internal { + +// helper function for dot(). The problem is that if we put that in the body of dot(), then upon calling dot +// with mismatched types, the compiler emits errors about failing to instantiate cwiseProduct BEFORE +// looking at the static assertions. Thus this is a trick to get better compile errors. +template +struct dot_nocheck +{ + typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; + static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) + { + return a.template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); + } +}; + +template +struct dot_nocheck +{ + typedef typename scalar_product_traits::Scalar,typename traits::Scalar>::ReturnType ResScalar; + static inline ResScalar run(const MatrixBase& a, const MatrixBase& b) + { + return a.transpose().template binaryExpr::Scalar,typename traits::Scalar> >(b).sum(); + } +}; + +} // end namespace internal + +/** \returns the dot product of *this with other. + * + * \only_for_vectors + * + * \note If the scalar type is complex numbers, then this function returns the hermitian + * (sesquilinear) dot product, conjugate-linear in the first variable and linear in the + * second variable. + * + * \sa squaredNorm(), norm() + */ +template +template +typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType +MatrixBase::dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + typedef internal::scalar_conj_product_op func; + EIGEN_CHECK_BINARY_COMPATIBILIY(func,Scalar,typename OtherDerived::Scalar); + + eigen_assert(size() == other.size()); + + return internal::dot_nocheck::run(*this, other); +} + +#ifdef EIGEN2_SUPPORT +/** \returns the dot product of *this with other, with the Eigen2 convention that the dot product is linear in the first variable + * (conjugating the second variable). Of course this only makes a difference in the complex case. + * + * This method is only available in EIGEN2_SUPPORT mode. + * + * \only_for_vectors + * + * \sa dot() + */ +template +template +typename internal::traits::Scalar +MatrixBase::eigen2_dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + eigen_assert(size() == other.size()); + + return internal::dot_nocheck::run(other,*this); +} +#endif + + +//---------- implementation of L2 norm and related functions ---------- + +/** \returns, for vectors, the squared \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the sum of the square of all the matrix entries. + * For vectors, this is also equals to the dot product of \c *this with itself. + * + * \sa dot(), norm() + */ +template +EIGEN_STRONG_INLINE typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const +{ + return internal::real((*this).cwiseAbs2().sum()); +} + +/** \returns, for vectors, the \em l2 norm of \c *this, and for matrices the Frobenius norm. + * In both cases, it consists in the square root of the sum of the square of all the matrix entries. + * For vectors, this is also equals to the square root of the dot product of \c *this with itself. + * + * \sa dot(), squaredNorm() + */ +template +inline typename NumTraits::Scalar>::Real MatrixBase::norm() const +{ + return internal::sqrt(squaredNorm()); +} + +/** \returns an expression of the quotient of *this by its own norm. + * + * \only_for_vectors + * + * \sa norm(), normalize() + */ +template +inline const typename MatrixBase::PlainObject +MatrixBase::normalized() const +{ + typedef typename internal::nested::type Nested; + typedef typename internal::remove_reference::type _Nested; + _Nested n(derived()); + return n / n.norm(); +} + +/** Normalizes the vector, i.e. divides it by its own norm. + * + * \only_for_vectors + * + * \sa norm(), normalized() + */ +template +inline void MatrixBase::normalize() +{ + *this /= norm(); +} + +//---------- implementation of other norms ---------- + +namespace internal { + +template +struct lpNorm_selector +{ + typedef typename NumTraits::Scalar>::Real RealScalar; + static inline RealScalar run(const MatrixBase& m) + { + return pow(m.cwiseAbs().array().pow(p).sum(), RealScalar(1)/p); + } +}; + +template +struct lpNorm_selector +{ + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.cwiseAbs().sum(); + } +}; + +template +struct lpNorm_selector +{ + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.norm(); + } +}; + +template +struct lpNorm_selector +{ + static inline typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.cwiseAbs().maxCoeff(); + } +}; + +} // end namespace internal + +/** \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values + * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^\infty \f$ + * norm, that is the maximum of the absolute values of the coefficients of *this. + * + * \sa norm() + */ +template +template +inline typename NumTraits::Scalar>::Real +MatrixBase::lpNorm() const +{ + return internal::lpNorm_selector::run(*this); +} + +//---------- implementation of isOrthogonal / isUnitary ---------- + +/** \returns true if *this is approximately orthogonal to \a other, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isOrthogonal.cpp + * Output: \verbinclude MatrixBase_isOrthogonal.out + */ +template +template +bool MatrixBase::isOrthogonal +(const MatrixBase& other, RealScalar prec) const +{ + typename internal::nested::type nested(derived()); + typename internal::nested::type otherNested(other.derived()); + return internal::abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); +} + +/** \returns true if *this is approximately an unitary matrix, + * within the precision given by \a prec. In the case where the \a Scalar + * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. + * + * \note This can be used to check whether a family of vectors forms an orthonormal basis. + * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an + * orthonormal basis. + * + * Example: \include MatrixBase_isUnitary.cpp + * Output: \verbinclude MatrixBase_isUnitary.out + */ +template +bool MatrixBase::isUnitary(RealScalar prec) const +{ + typename Derived::Nested nested(derived()); + for(Index i = 0; i < cols(); ++i) + { + if(!internal::isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) + return false; + for(Index j = 0; j < i; ++j) + if(!internal::isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) + return false; + } + return true; +} + +} // end namespace Eigen + +#endif // EIGEN_DOT_H diff --git a/latan/Eigen/src/Core/EigenBase.h b/latan/Eigen/src/Core/EigenBase.h new file mode 100644 index 0000000..0bbd28b --- /dev/null +++ b/latan/Eigen/src/Core/EigenBase.h @@ -0,0 +1,160 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_EIGENBASE_H +#define EIGEN_EIGENBASE_H + +namespace Eigen { + +/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T). + * + * In other words, an EigenBase object is an object that can be copied into a MatrixBase. + * + * Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc. + * + * Notice that this class is trivial, it is only used to disambiguate overloaded functions. + * + * \sa \ref TopicClassHierarchy + */ +template struct EigenBase +{ +// typedef typename internal::plain_matrix_type::type PlainObject; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + + /** \returns a reference to the derived object */ + Derived& derived() { return *static_cast(this); } + /** \returns a const reference to the derived object */ + const Derived& derived() const { return *static_cast(this); } + + inline Derived& const_cast_derived() const + { return *static_cast(const_cast(this)); } + inline const Derived& const_derived() const + { return *static_cast(this); } + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + inline Index rows() const { return derived().rows(); } + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + inline Index cols() const { return derived().cols(); } + /** \returns the number of coefficients, which is rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + inline Index size() const { return rows() * cols(); } + + /** \internal Don't use it, but do the equivalent: \code dst = *this; \endcode */ + template inline void evalTo(Dest& dst) const + { derived().evalTo(dst); } + + /** \internal Don't use it, but do the equivalent: \code dst += *this; \endcode */ + template inline void addTo(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(),cols()); + evalTo(res); + dst += res; + } + + /** \internal Don't use it, but do the equivalent: \code dst -= *this; \endcode */ + template inline void subTo(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + typename Dest::PlainObject res(rows(),cols()); + evalTo(res); + dst -= res; + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheRight(*this); \endcode */ + template inline void applyThisOnTheRight(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = dst * this->derived(); + } + + /** \internal Don't use it, but do the equivalent: \code dst.applyOnTheLeft(*this); \endcode */ + template inline void applyThisOnTheLeft(Dest& dst) const + { + // This is the default implementation, + // derived class can reimplement it in a more optimized way. + dst = this->derived() * dst; + } + +}; + +/*************************************************************************** +* Implementation of matrix base methods +***************************************************************************/ + +/** \brief Copies the generic expression \a other into *this. + * + * \details The expression must provide a (templated) evalTo(Derived& dst) const + * function which does the actual job. In practice, this allows any user to write + * its own special matrix without having to modify MatrixBase + * + * \returns a reference to *this. + */ +template +template +Derived& DenseBase::operator=(const EigenBase &other) +{ + other.derived().evalTo(derived()); + return derived(); +} + +template +template +Derived& DenseBase::operator+=(const EigenBase &other) +{ + other.derived().addTo(derived()); + return derived(); +} + +template +template +Derived& DenseBase::operator-=(const EigenBase &other) +{ + other.derived().subTo(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + */ +template +template +inline Derived& +MatrixBase::operator*=(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); + return derived(); +} + +/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */ +template +template +inline void MatrixBase::applyOnTheRight(const EigenBase &other) +{ + other.derived().applyThisOnTheRight(derived()); +} + +/** replaces \c *this by \c *this * \a other. */ +template +template +inline void MatrixBase::applyOnTheLeft(const EigenBase &other) +{ + other.derived().applyThisOnTheLeft(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_EIGENBASE_H diff --git a/latan/Eigen/src/Core/Flagged.h b/latan/Eigen/src/Core/Flagged.h new file mode 100644 index 0000000..1f2955f --- /dev/null +++ b/latan/Eigen/src/Core/Flagged.h @@ -0,0 +1,140 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FLAGGED_H +#define EIGEN_FLAGGED_H + +namespace Eigen { + +/** \class Flagged + * \ingroup Core_Module + * + * \brief Expression with modified flags + * + * \param ExpressionType the type of the object of which we are modifying the flags + * \param Added the flags added to the expression + * \param Removed the flags removed from the expression (has priority over Added). + * + * This class represents an expression whose flags have been modified. + * It is the return type of MatrixBase::flagged() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::flagged() + */ + +namespace internal { +template +struct traits > : traits +{ + enum { Flags = (ExpressionType::Flags | Added) & ~Removed }; +}; +} + +template class Flagged + : public MatrixBase > +{ + public: + + typedef MatrixBase Base; + + EIGEN_DENSE_PUBLIC_INTERFACE(Flagged) + typedef typename internal::conditional::ret, + ExpressionType, const ExpressionType&>::type ExpressionTypeNested; + typedef typename ExpressionType::InnerIterator InnerIterator; + + inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {} + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + inline Index outerStride() const { return m_matrix.outerStride(); } + inline Index innerStride() const { return m_matrix.innerStride(); } + + inline CoeffReturnType coeff(Index row, Index col) const + { + return m_matrix.coeff(row, col); + } + + inline CoeffReturnType coeff(Index index) const + { + return m_matrix.coeff(index); + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return m_matrix.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar& coeffRef(Index index) const + { + return m_matrix.const_cast_derived().coeffRef(index); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_matrix.const_cast_derived().coeffRef(row, col); + } + + inline Scalar& coeffRef(Index index) + { + return m_matrix.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_matrix.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_matrix.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(index, x); + } + + const ExpressionType& _expression() const { return m_matrix; } + + template + typename ExpressionType::PlainObject solveTriangular(const MatrixBase& other) const; + + template + void solveTriangularInPlace(const MatrixBase& other) const; + + protected: + ExpressionTypeNested m_matrix; +}; + +/** \returns an expression of *this with added and removed flags + * + * This is mostly for internal use. + * + * \sa class Flagged + */ +template +template +inline const Flagged +DenseBase::flagged() const +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_FLAGGED_H diff --git a/latan/Eigen/src/Core/ForceAlignedAccess.h b/latan/Eigen/src/Core/ForceAlignedAccess.h new file mode 100644 index 0000000..807c7a2 --- /dev/null +++ b/latan/Eigen/src/Core/ForceAlignedAccess.h @@ -0,0 +1,146 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FORCEALIGNEDACCESS_H +#define EIGEN_FORCEALIGNEDACCESS_H + +namespace Eigen { + +/** \class ForceAlignedAccess + * \ingroup Core_Module + * + * \brief Enforce aligned packet loads and stores regardless of what is requested + * + * \param ExpressionType the type of the object of which we are forcing aligned packet access + * + * This class is the return type of MatrixBase::forceAlignedAccess() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::forceAlignedAccess() + */ + +namespace internal { +template +struct traits > : public traits +{}; +} + +template class ForceAlignedAccess + : public internal::dense_xpr_base< ForceAlignedAccess >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ForceAlignedAccess) + + inline ForceAlignedAccess(const ExpressionType& matrix) : m_expression(matrix) {} + + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } + + inline const CoeffReturnType coeff(Index row, Index col) const + { + return m_expression.coeff(row, col); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline const CoeffReturnType coeff(Index index) const + { + return m_expression.coeff(index); + } + + inline Scalar& coeffRef(Index index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + operator const ExpressionType&() const { return m_expression; } + + protected: + const ExpressionType& m_expression; + + private: + ForceAlignedAccess& operator=(const ForceAlignedAccess&); +}; + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(),class ForceAlignedAccess + */ +template +inline const ForceAlignedAccess +MatrixBase::forceAlignedAccess() const +{ + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access + * \sa forceAlignedAccessIf(), class ForceAlignedAccess + */ +template +inline ForceAlignedAccess +MatrixBase::forceAlignedAccess() +{ + return ForceAlignedAccess(derived()); +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline typename internal::add_const_on_value_type,Derived&>::type>::type +MatrixBase::forceAlignedAccessIf() const +{ + return derived(); +} + +/** \returns an expression of *this with forced aligned access if \a Enable is true. + * \sa forceAlignedAccess(), class ForceAlignedAccess + */ +template +template +inline typename internal::conditional,Derived&>::type +MatrixBase::forceAlignedAccessIf() +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_FORCEALIGNEDACCESS_H diff --git a/latan/Eigen/src/Core/Functors.h b/latan/Eigen/src/Core/Functors.h new file mode 100644 index 0000000..bb7b84a --- /dev/null +++ b/latan/Eigen/src/Core/Functors.h @@ -0,0 +1,975 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FUNCTORS_H +#define EIGEN_FUNCTORS_H + +namespace Eigen { + +namespace internal { + +// associative functors: + +/** \internal + * \brief Template functor to compute the sum of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator+, class VectorwiseOp, MatrixBase::sum() + */ +template struct scalar_sum_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sum_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::padd(a,b); } + template + EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const + { return internal::predux(a); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasAdd + }; +}; + +/** \internal + * \brief Template functor to compute the product of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator*(), class VectorwiseOp, MatrixBase::redux() + */ +template struct scalar_product_op { + enum { + // TODO vectorize mixed product + Vectorizable = is_same::value && packet_traits::HasMul && packet_traits::HasMul + }; + typedef typename scalar_product_traits::ReturnType result_type; + EIGEN_EMPTY_STRUCT_CTOR(scalar_product_op) + EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return a * b; } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmul(a,b); } + template + EIGEN_STRONG_INLINE const result_type predux(const Packet& a) const + { return internal::predux_mul(a); } +}; +template +struct functor_traits > { + enum { + Cost = (NumTraits::MulCost + NumTraits::MulCost)/2, // rough estimate! + PacketAccess = scalar_product_op::Vectorizable + }; +}; + +/** \internal + * \brief Template functor to compute the conjugate product of two scalars + * + * This is a short cut for conj(x) * y which is needed for optimization purpose; in Eigen2 support mode, this becomes x * conj(y) + */ +template struct scalar_conj_product_op { + + enum { + Conj = NumTraits::IsComplex + }; + + typedef typename scalar_product_traits::ReturnType result_type; + + EIGEN_EMPTY_STRUCT_CTOR(scalar_conj_product_op) + EIGEN_STRONG_INLINE const result_type operator() (const LhsScalar& a, const RhsScalar& b) const + { return conj_helper().pmul(a,b); } + + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return conj_helper().pmul(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::MulCost, + PacketAccess = internal::is_same::value && packet_traits::HasMul + }; +}; + +/** \internal + * \brief Template functor to compute the min of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class VectorwiseOp, MatrixBase::minCoeff() + */ +template struct scalar_min_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_min_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::min; return (min)(a, b); } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmin(a,b); } + template + EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const + { return internal::predux_min(a); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasMin + }; +}; + +/** \internal + * \brief Template functor to compute the max of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class VectorwiseOp, MatrixBase::maxCoeff() + */ +template struct scalar_max_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_max_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { using std::max; return (max)(a, b); } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pmax(a,b); } + template + EIGEN_STRONG_INLINE const Scalar predux(const Packet& a) const + { return internal::predux_max(a); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasMax + }; +}; + +/** \internal + * \brief Template functor to compute the hypot of two scalars + * + * \sa MatrixBase::stableNorm(), class Redux + */ +template struct scalar_hypot_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_hypot_op) +// typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& _x, const Scalar& _y) const + { + using std::max; + using std::min; + Scalar p = (max)(_x, _y); + Scalar q = (min)(_x, _y); + Scalar qp = q/p; + return p * sqrt(Scalar(1) + qp*qp); + } +}; +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess=0 }; +}; + +/** \internal + * \brief Template functor to compute the pow of two scalars + */ +template struct scalar_binary_pow_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_binary_pow_op) + inline Scalar operator() (const Scalar& a, const OtherScalar& b) const { return internal::pow(a, b); } +}; +template +struct functor_traits > { + enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; +}; + +// other binary functors: + +/** \internal + * \brief Template functor to compute the difference of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator- + */ +template struct scalar_difference_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_difference_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::psub(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasSub + }; +}; + +/** \internal + * \brief Template functor to compute the quotient of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator/() + */ +template struct scalar_quotient_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_quotient_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a, const Packet& b) const + { return internal::pdiv(a,b); } +}; +template +struct functor_traits > { + enum { + Cost = 2 * NumTraits::MulCost, + PacketAccess = packet_traits::HasDiv + }; +}; + +/** \internal + * \brief Template functor to compute the and of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator&& + */ +struct scalar_boolean_and_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_and_op) + EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a && b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +/** \internal + * \brief Template functor to compute the or of two booleans + * + * \sa class CwiseBinaryOp, ArrayBase::operator|| + */ +struct scalar_boolean_or_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_boolean_or_op) + EIGEN_STRONG_INLINE bool operator() (const bool& a, const bool& b) const { return a || b; } +}; +template<> struct functor_traits { + enum { + Cost = NumTraits::AddCost, + PacketAccess = false + }; +}; + +// unary functors: + +/** \internal + * \brief Template functor to compute the opposite of a scalar + * + * \sa class CwiseUnaryOp, MatrixBase::operator- + */ +template struct scalar_opposite_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_opposite_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pnegate(a); } +}; +template +struct functor_traits > +{ enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasNegate }; +}; + +/** \internal + * \brief Template functor to compute the absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs + */ +template struct scalar_abs_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs(a); } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pabs(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::AddCost, + PacketAccess = packet_traits::HasAbs + }; +}; + +/** \internal + * \brief Template functor to compute the squared absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs2 + */ +template struct scalar_abs2_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_abs2_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return internal::abs2(a); } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasAbs2 }; }; + +/** \internal + * \brief Template functor to compute the conjugate of a complex value + * + * \sa class CwiseUnaryOp, MatrixBase::conjugate() + */ +template struct scalar_conjugate_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_conjugate_op) + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return internal::conj(a); } + template + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const { return internal::pconj(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = NumTraits::IsComplex ? NumTraits::AddCost : 0, + PacketAccess = packet_traits::HasConj + }; +}; + +/** \internal + * \brief Template functor to cast a scalar to another type + * + * \sa class CwiseUnaryOp, MatrixBase::cast() + */ +template +struct scalar_cast_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cast_op) + typedef NewType result_type; + EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return cast(a); } +}; +template +struct functor_traits > +{ enum { Cost = is_same::value ? 0 : NumTraits::AddCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::real(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return internal::imag(a); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct scalar_real_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_real_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::real_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex as a reference + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct scalar_imag_ref_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_imag_ref_op) + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type& operator() (const Scalar& a) const { return internal::imag_ref(*const_cast(&a)); } +}; +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * + * \brief Template functor to compute the exponential of a scalar + * + * \sa class CwiseUnaryOp, Cwise::exp() + */ +template struct scalar_exp_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_exp_op) + inline const Scalar operator() (const Scalar& a) const { return internal::exp(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pexp(a); } +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasExp }; }; + +/** \internal + * + * \brief Template functor to compute the logarithm of a scalar + * + * \sa class CwiseUnaryOp, Cwise::log() + */ +template struct scalar_log_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_log_op) + inline const Scalar operator() (const Scalar& a) const { return internal::log(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::plog(a); } +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = packet_traits::HasLog }; }; + +/** \internal + * \brief Template functor to multiply a scalar by a fixed other one + * + * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/ + */ +/* NOTE why doing the pset1() in packetOp *is* an optimization ? + * indeed it seems better to declare m_other as a Packet and do the pset1() once + * in the constructor. However, in practice: + * - GCC does not like m_other as a Packet and generate a load every time it needs it + * - on the other hand GCC is able to moves the pset1() outside the loop :) + * - simpler code ;) + * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y) + */ +template +struct scalar_multiple_op { + typedef typename packet_traits::type Packet; + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE scalar_multiple_op(const scalar_multiple_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_multiple_op(const Scalar& other) : m_other(other) { } + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pmul(a, pset1(m_other)); } + typename add_const_on_value_type::Nested>::type m_other; +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +template +struct scalar_multiple2_op { + typedef typename scalar_product_traits::ReturnType result_type; + EIGEN_STRONG_INLINE scalar_multiple2_op(const scalar_multiple2_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_multiple2_op(const Scalar2& other) : m_other(other) { } + EIGEN_STRONG_INLINE result_type operator() (const Scalar1& a) const { return a * m_other; } + typename add_const_on_value_type::Nested>::type m_other; +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to divide a scalar by a fixed other one + * + * This functor is used to implement the quotient of a matrix by + * a scalar where the scalar type is not necessarily a floating point type. + * + * \sa class CwiseUnaryOp, MatrixBase::operator/ + */ +template +struct scalar_quotient1_op { + typedef typename packet_traits::type Packet; + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE scalar_quotient1_op(const scalar_quotient1_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_quotient1_op(const Scalar& other) : m_other(other) {} + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } + EIGEN_STRONG_INLINE const Packet packetOp(const Packet& a) const + { return internal::pdiv(a, pset1(m_other)); } + typename add_const_on_value_type::Nested>::type m_other; +}; +template +struct functor_traits > +{ enum { Cost = 2 * NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; + +// nullary functors + +template +struct scalar_constant_op { + typedef typename packet_traits::type Packet; + EIGEN_STRONG_INLINE scalar_constant_op(const scalar_constant_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE scalar_constant_op(const Scalar& other) : m_other(other) { } + template + EIGEN_STRONG_INLINE const Scalar operator() (Index, Index = 0) const { return m_other; } + template + EIGEN_STRONG_INLINE const Packet packetOp(Index, Index = 0) const { return internal::pset1(m_other); } + const Scalar m_other; +}; +template +struct functor_traits > +// FIXME replace this packet test by a safe one +{ enum { Cost = 1, PacketAccess = packet_traits::Vectorizable, IsRepeatable = true }; }; + +template struct scalar_identity_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_identity_op) + template + EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const { return row==col ? Scalar(1) : Scalar(0); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; + +template struct linspaced_op_impl; + +// linear access for packet ops: +// 1) initialization +// base = [low, ..., low] + ([step, ..., step] * [-size, ..., 0]) +// 2) each step (where size is 1 for coeff access or PacketSize for packet access) +// base += [size*step, ..., size*step] +// +// TODO: Perhaps it's better to initialize lazily (so not in the constructor but in packetOp) +// in order to avoid the padd() in operator() ? +template +struct linspaced_op_impl +{ + typedef typename packet_traits::type Packet; + + linspaced_op_impl(Scalar low, Scalar step) : + m_low(low), m_step(step), + m_packetStep(pset1(packet_traits::size*step)), + m_base(padd(pset1(low), pmul(pset1(step),plset(-packet_traits::size)))) {} + + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const + { + m_base = padd(m_base, pset1(m_step)); + return m_low+i*m_step; + } + + template + EIGEN_STRONG_INLINE const Packet packetOp(Index) const { return m_base = padd(m_base,m_packetStep); } + + const Scalar m_low; + const Scalar m_step; + const Packet m_packetStep; + mutable Packet m_base; +}; + +// random access for packet ops: +// 1) each step +// [low, ..., low] + ( [step, ..., step] * ( [i, ..., i] + [0, ..., size] ) ) +template +struct linspaced_op_impl +{ + typedef typename packet_traits::type Packet; + + linspaced_op_impl(Scalar low, Scalar step) : + m_low(low), m_step(step), + m_lowPacket(pset1(m_low)), m_stepPacket(pset1(m_step)), m_interPacket(plset(0)) {} + + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return m_low+i*m_step; } + + template + EIGEN_STRONG_INLINE const Packet packetOp(Index i) const + { return internal::padd(m_lowPacket, pmul(m_stepPacket, padd(pset1(i),m_interPacket))); } + + const Scalar m_low; + const Scalar m_step; + const Packet m_lowPacket; + const Packet m_stepPacket; + const Packet m_interPacket; +}; + +// ----- Linspace functor ---------------------------------------------------------------- + +// Forward declaration (we default to random access which does not really give +// us a speed gain when using packet access but it allows to use the functor in +// nested expressions). +template struct linspaced_op; +template struct functor_traits< linspaced_op > +{ enum { Cost = 1, PacketAccess = packet_traits::HasSetLinear, IsRepeatable = true }; }; +template struct linspaced_op +{ + typedef typename packet_traits::type Packet; + linspaced_op(Scalar low, Scalar high, int num_steps) : impl((num_steps==1 ? high : low), (num_steps==1 ? Scalar() : (high-low)/(num_steps-1))) {} + + template + EIGEN_STRONG_INLINE const Scalar operator() (Index i) const { return impl(i); } + + // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since + // there row==0 and col is used for the actual iteration. + template + EIGEN_STRONG_INLINE const Scalar operator() (Index row, Index col) const + { + eigen_assert(col==0 || row==0); + return impl(col + row); + } + + template + EIGEN_STRONG_INLINE const Packet packetOp(Index i) const { return impl.packetOp(i); } + + // We need this function when assigning e.g. a RowVectorXd to a MatrixXd since + // there row==0 and col is used for the actual iteration. + template + EIGEN_STRONG_INLINE const Packet packetOp(Index row, Index col) const + { + eigen_assert(col==0 || row==0); + return impl.packetOp(col + row); + } + + // This proxy object handles the actual required temporaries, the different + // implementations (random vs. sequential access) as well as the + // correct piping to size 2/4 packet operations. + const linspaced_op_impl impl; +}; + +// all functors allow linear access, except scalar_identity_op. So we fix here a quick meta +// to indicate whether a functor allows linear access, just always answering 'yes' except for +// scalar_identity_op. +// FIXME move this to functor_traits adding a functor_default +template struct functor_has_linear_access { enum { ret = 1 }; }; +template struct functor_has_linear_access > { enum { ret = 0 }; }; + +// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication +// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex. +// FIXME move this to functor_traits adding a functor_default +template struct functor_allows_mixing_real_and_complex { enum { ret = 0 }; }; +template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; +template struct functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; + + +/** \internal + * \brief Template functor to add a scalar to a fixed other one + * \sa class CwiseUnaryOp, Array::operator+ + */ +/* If you wonder why doing the pset1() in packetOp() is an optimization check scalar_multiple_op */ +template +struct scalar_add_op { + typedef typename packet_traits::type Packet; + // FIXME default copy constructors seems bugged with std::complex<> + inline scalar_add_op(const scalar_add_op& other) : m_other(other.m_other) { } + inline scalar_add_op(const Scalar& other) : m_other(other) { } + inline Scalar operator() (const Scalar& a) const { return a + m_other; } + inline const Packet packetOp(const Packet& a) const + { return internal::padd(a, pset1(m_other)); } + const Scalar m_other; +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = packet_traits::HasAdd }; }; + +/** \internal + * \brief Template functor to compute the square root of a scalar + * \sa class CwiseUnaryOp, Cwise::sqrt() + */ +template struct scalar_sqrt_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sqrt_op) + inline const Scalar operator() (const Scalar& a) const { return internal::sqrt(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::psqrt(a); } +}; +template +struct functor_traits > +{ enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSqrt + }; +}; + +/** \internal + * \brief Template functor to compute the cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::cos() + */ +template struct scalar_cos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cos_op) + inline Scalar operator() (const Scalar& a) const { return internal::cos(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pcos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasCos + }; +}; + +/** \internal + * \brief Template functor to compute the sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::sin() + */ +template struct scalar_sin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_sin_op) + inline const Scalar operator() (const Scalar& a) const { return internal::sin(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::psin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasSin + }; +}; + + +/** \internal + * \brief Template functor to compute the tan of a scalar + * \sa class CwiseUnaryOp, ArrayBase::tan() + */ +template struct scalar_tan_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_tan_op) + inline const Scalar operator() (const Scalar& a) const { return internal::tan(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::ptan(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasTan + }; +}; + +/** \internal + * \brief Template functor to compute the arc cosine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::acos() + */ +template struct scalar_acos_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_acos_op) + inline const Scalar operator() (const Scalar& a) const { return internal::acos(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pacos(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasACos + }; +}; + +/** \internal + * \brief Template functor to compute the arc sine of a scalar + * \sa class CwiseUnaryOp, ArrayBase::asin() + */ +template struct scalar_asin_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_asin_op) + inline const Scalar operator() (const Scalar& a) const { return internal::asin(a); } + typedef typename packet_traits::type Packet; + inline Packet packetOp(const Packet& a) const { return internal::pasin(a); } +}; +template +struct functor_traits > +{ + enum { + Cost = 5 * NumTraits::MulCost, + PacketAccess = packet_traits::HasASin + }; +}; + +/** \internal + * \brief Template functor to raise a scalar to a power + * \sa class CwiseUnaryOp, Cwise::pow + */ +template +struct scalar_pow_op { + // FIXME default copy constructors seems bugged with std::complex<> + inline scalar_pow_op(const scalar_pow_op& other) : m_exponent(other.m_exponent) { } + inline scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {} + inline Scalar operator() (const Scalar& a) const { return internal::pow(a, m_exponent); } + const Scalar m_exponent; +}; +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to compute the quotient between a scalar and array entries. + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct scalar_inverse_mult_op { + scalar_inverse_mult_op(const Scalar& other) : m_other(other) {} + inline Scalar operator() (const Scalar& a) const { return m_other / a; } + template + inline const Packet packetOp(const Packet& a) const + { return internal::pdiv(pset1(m_other),a); } + Scalar m_other; +}; + +/** \internal + * \brief Template functor to compute the inverse of a scalar + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct scalar_inverse_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_inverse_op) + inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } + template + inline const Packet packetOp(const Packet& a) const + { return internal::pdiv(pset1(Scalar(1)),a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasDiv }; }; + +/** \internal + * \brief Template functor to compute the square of a scalar + * \sa class CwiseUnaryOp, Cwise::square() + */ +template +struct scalar_square_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_square_op) + inline Scalar operator() (const Scalar& a) const { return a*a; } + template + inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,a); } +}; +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +/** \internal + * \brief Template functor to compute the cube of a scalar + * \sa class CwiseUnaryOp, Cwise::cube() + */ +template +struct scalar_cube_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_cube_op) + inline Scalar operator() (const Scalar& a) const { return a*a*a; } + template + inline const Packet packetOp(const Packet& a) const + { return internal::pmul(a,pmul(a,a)); } +}; +template +struct functor_traits > +{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = packet_traits::HasMul }; }; + +// default functor traits for STL functors: + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 1 + functor_traits::Cost, PacketAccess = false }; }; + +#ifdef EIGEN_STDEXT_SUPPORT + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +template +struct functor_traits > +{ enum { Cost = functor_traits::Cost + functor_traits::Cost + functor_traits::Cost, PacketAccess = false }; }; + +#endif // EIGEN_STDEXT_SUPPORT + +// allow to add new functors and specializations of functor_traits from outside Eigen. +// this macro is really needed because functor_traits must be specialized after it is declared but before it is used... +#ifdef EIGEN_FUNCTORS_PLUGIN +#include EIGEN_FUNCTORS_PLUGIN +#endif + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_FUNCTORS_H diff --git a/latan/Eigen/src/Core/Fuzzy.h b/latan/Eigen/src/Core/Fuzzy.h new file mode 100644 index 0000000..d74edcf --- /dev/null +++ b/latan/Eigen/src/Core/Fuzzy.h @@ -0,0 +1,150 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_FUZZY_H +#define EIGEN_FUZZY_H + +namespace Eigen { + +namespace internal +{ + +template::IsInteger> +struct isApprox_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + { + using std::min; + typename internal::nested::type nested(x); + typename internal::nested::type otherNested(y); + return (nested - otherNested).cwiseAbs2().sum() <= prec * prec * (min)(nested.cwiseAbs2().sum(), otherNested.cwiseAbs2().sum()); + } +}; + +template +struct isApprox_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar) + { + return x.matrix() == y.matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_object_selector +{ + static bool run(const Derived& x, const OtherDerived& y, typename Derived::RealScalar prec) + { + return x.cwiseAbs2().sum() <= abs2(prec) * y.cwiseAbs2().sum(); + } +}; + +template +struct isMuchSmallerThan_object_selector +{ + static bool run(const Derived& x, const OtherDerived&, typename Derived::RealScalar) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +template::IsInteger> +struct isMuchSmallerThan_scalar_selector +{ + static bool run(const Derived& x, const typename Derived::RealScalar& y, typename Derived::RealScalar prec) + { + return x.cwiseAbs2().sum() <= abs2(prec * y); + } +}; + +template +struct isMuchSmallerThan_scalar_selector +{ + static bool run(const Derived& x, const typename Derived::RealScalar&, typename Derived::RealScalar) + { + return x.matrix() == Derived::Zero(x.rows(), x.cols()).matrix(); + } +}; + +} // end namespace internal + + +/** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ + * are considered to be approximately equal within precision \f$ p \f$ if + * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm + * L2 norm). + * + * \note Because of the multiplicativeness of this comparison, one can't use this function + * to check whether \c *this is approximately equal to the zero matrix or vector. + * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix + * or vector. If you want to test whether \c *this is zero, use internal::isMuchSmallerThan(const + * RealScalar&, RealScalar) instead. + * + * \sa internal::isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool DenseBase::isApprox( + const DenseBase& other, + RealScalar prec +) const +{ + return internal::isApprox_selector::run(derived(), other.derived(), prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] + * + * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, + * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm + * of a reference matrix of same dimensions. + * + * \sa isApprox(), isMuchSmallerThan(const DenseBase&, RealScalar) const + */ +template +bool DenseBase::isMuchSmallerThan( + const typename NumTraits::Real& other, + RealScalar prec +) const +{ + return internal::isMuchSmallerThan_scalar_selector::run(derived(), other, prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm. + * + * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool DenseBase::isMuchSmallerThan( + const DenseBase& other, + RealScalar prec +) const +{ + return internal::isMuchSmallerThan_object_selector::run(derived(), other.derived(), prec); +} + +} // end namespace Eigen + +#endif // EIGEN_FUZZY_H diff --git a/latan/Eigen/src/Core/GeneralProduct.h b/latan/Eigen/src/Core/GeneralProduct.h new file mode 100644 index 0000000..bfc2a67 --- /dev/null +++ b/latan/Eigen/src/Core/GeneralProduct.h @@ -0,0 +1,613 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERAL_PRODUCT_H +#define EIGEN_GENERAL_PRODUCT_H + +namespace Eigen { + +/** \class GeneralProduct + * \ingroup Core_Module + * + * \brief Expression of the product of two general matrices or vectors + * + * \param LhsNested the type used to store the left-hand side + * \param RhsNested the type used to store the right-hand side + * \param ProductMode the type of the product + * + * This class represents an expression of the product of two general matrices. + * We call a general matrix, a dense matrix with full storage. For instance, + * This excludes triangular, selfadjoint, and sparse matrices. + * It is the return type of the operator* between general matrices. Its template + * arguments are determined automatically by ProductReturnType. Therefore, + * GeneralProduct should never be used direclty. To determine the result type of a + * function which involves a matrix product, use ProductReturnType::Type. + * + * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) + */ +template::value> +class GeneralProduct; + +enum { + Large = 2, + Small = 3 +}; + +namespace internal { + +template struct product_type_selector; + +template struct product_size_category +{ + enum { is_large = MaxSize == Dynamic || + Size >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD, + value = is_large ? Large + : Size == 1 ? 1 + : Small + }; +}; + +template struct product_type +{ + typedef typename remove_all::type _Lhs; + typedef typename remove_all::type _Rhs; + enum { + MaxRows = _Lhs::MaxRowsAtCompileTime, + Rows = _Lhs::RowsAtCompileTime, + MaxCols = _Rhs::MaxColsAtCompileTime, + Cols = _Rhs::ColsAtCompileTime, + MaxDepth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::MaxColsAtCompileTime, + _Rhs::MaxRowsAtCompileTime), + Depth = EIGEN_SIZE_MIN_PREFER_FIXED(_Lhs::ColsAtCompileTime, + _Rhs::RowsAtCompileTime), + LargeThreshold = EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + }; + + // the splitting into different lines of code here, introducing the _select enums and the typedef below, + // is to work around an internal compiler error with gcc 4.1 and 4.2. +private: + enum { + rows_select = product_size_category::value, + cols_select = product_size_category::value, + depth_select = product_size_category::value + }; + typedef product_type_selector selector; + +public: + enum { + value = selector::ret + }; +#ifdef EIGEN_DEBUG_PRODUCT + static void debug() + { + EIGEN_DEBUG_VAR(Rows); + EIGEN_DEBUG_VAR(Cols); + EIGEN_DEBUG_VAR(Depth); + EIGEN_DEBUG_VAR(rows_select); + EIGEN_DEBUG_VAR(cols_select); + EIGEN_DEBUG_VAR(depth_select); + EIGEN_DEBUG_VAR(value); + } +#endif +}; + + +/* The following allows to select the kind of product at compile time + * based on the three dimensions of the product. + * This is a compile time mapping from {1,Small,Large}^3 -> {product types} */ +// FIXME I'm not sure the current mapping is the ideal one. +template struct product_type_selector { enum { ret = OuterProduct }; }; +template struct product_type_selector<1, 1, Depth> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector<1, 1, 1> { enum { ret = InnerProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Small,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = LazyCoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Small> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector<1, Large,Large> { enum { ret = GemvProduct }; }; +template<> struct product_type_selector<1, Small,Large> { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemvProduct }; }; +template<> struct product_type_selector { enum { ret = CoeffBasedProductMode }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; +template<> struct product_type_selector { enum { ret = GemmProduct }; }; + +} // end namespace internal + +/** \class ProductReturnType + * \ingroup Core_Module + * + * \brief Helper class to get the correct and optimized returned type of operator* + * + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * \param ProductMode the type of the product (determined automatically by internal::product_mode) + * + * This class defines the typename Type representing the optimized product expression + * between two matrix expressions. In practice, using ProductReturnType::Type + * is the recommended way to define the result type of a function returning an expression + * which involve a matrix product. The class Product should never be + * used directly. + * + * \sa class Product, MatrixBase::operator*(const MatrixBase&) + */ +template +struct ProductReturnType +{ + // TODO use the nested type to reduce instanciations ???? +// typedef typename internal::nested::type LhsNested; +// typedef typename internal::nested::type RhsNested; + + typedef GeneralProduct Type; +}; + +template +struct ProductReturnType +{ + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; + typedef CoeffBasedProduct Type; +}; + +template +struct ProductReturnType +{ + typedef typename internal::nested::type >::type LhsNested; + typedef typename internal::nested::type >::type RhsNested; + typedef CoeffBasedProduct Type; +}; + +// this is a workaround for sun CC +template +struct LazyProductReturnType : public ProductReturnType +{}; + +/*********************************************************************** +* Implementation of Inner Vector Vector Product +***********************************************************************/ + +// FIXME : maybe the "inner product" could return a Scalar +// instead of a 1x1 matrix ?? +// Pro: more natural for the user +// Cons: this could be a problem if in a meta unrolled algorithm a matrix-matrix +// product ends up to a row-vector times col-vector product... To tackle this use +// case, we could have a specialization for Block with: operator=(Scalar x); + +namespace internal { + +template +struct traits > + : traits::ReturnType,1,1> > +{}; + +} + +template +class GeneralProduct + : internal::no_assignment_operator, + public Matrix::ReturnType,1,1> +{ + typedef Matrix::ReturnType,1,1> Base; + public: + GeneralProduct(const Lhs& lhs, const Rhs& rhs) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + Base::coeffRef(0,0) = (lhs.transpose().cwiseProduct(rhs)).sum(); + } + + /** Convertion to scalar */ + operator const typename Base::Scalar() const { + return Base::coeff(0,0); + } +}; + +/*********************************************************************** +* Implementation of Outer Vector Vector Product +***********************************************************************/ + +namespace internal { +template struct outer_product_selector; + +template +struct traits > + : traits, Lhs, Rhs> > +{}; + +} + +template +class GeneralProduct + : public ProductBase, Lhs, Rhs> +{ + public: + EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) + + GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + { + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + } + + template void scaleAndAddTo(Dest& dest, Scalar alpha) const + { + internal::outer_product_selector<(int(Dest::Flags)&RowMajorBit) ? RowMajor : ColMajor>::run(*this, dest, alpha); + } +}; + +namespace internal { + +template<> struct outer_product_selector { + template + static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; + // FIXME make sure lhs is sequentially stored + // FIXME not very good if rhs is real and lhs complex while alpha is real too + const Index cols = dest.cols(); + for (Index j=0; j struct outer_product_selector { + template + static EIGEN_DONT_INLINE void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) { + typedef typename Dest::Index Index; + // FIXME make sure rhs is sequentially stored + // FIXME not very good if lhs is real and rhs complex while alpha is real too + const Index rows = dest.rows(); + for (Index i=0; i call fast BLAS-like colmajor routine + * 2 - the matrix is row-major, BLAS compatible and N is large => call fast BLAS-like rowmajor routine + * 3 - all other cases are handled using a simple loop along the outer-storage direction. + * Therefore we need a lower level meta selector. + * Furthermore, if the matrix is the rhs, then the product has to be transposed. + */ +namespace internal { + +template +struct traits > + : traits, Lhs, Rhs> > +{}; + +template +struct gemv_selector; + +} // end namespace internal + +template +class GeneralProduct + : public ProductBase, Lhs, Rhs> +{ + public: + EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) + + typedef typename Lhs::Scalar LhsScalar; + typedef typename Rhs::Scalar RhsScalar; + + GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) + { +// EIGEN_STATIC_ASSERT((internal::is_same::value), +// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + } + + enum { Side = Lhs::IsVectorAtCompileTime ? OnTheLeft : OnTheRight }; + typedef typename internal::conditional::type MatrixType; + + template void scaleAndAddTo(Dest& dst, Scalar alpha) const + { + eigen_assert(m_lhs.rows() == dst.rows() && m_rhs.cols() == dst.cols()); + internal::gemv_selector::HasUsableDirectAccess)>::run(*this, dst, alpha); + } +}; + +namespace internal { + +// The vector is on the left => transposition +template +struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + Transpose destT(dest); + enum { OtherStorageOrder = StorageOrder == RowMajor ? ColMajor : RowMajor }; + gemv_selector + ::run(GeneralProduct,Transpose, GemvProduct> + (prod.rhs().transpose(), prod.lhs().transpose()), destT, alpha); + } +}; + +template struct gemv_static_vector_if; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { eigen_internal_assert(false && "should never be called"); return 0; } +}; + +template +struct gemv_static_vector_if +{ + EIGEN_STRONG_INLINE Scalar* data() { return 0; } +}; + +template +struct gemv_static_vector_if +{ + #if EIGEN_ALIGN_STATICALLY + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { return m_data.array; } + #else + // Some architectures cannot align on the stack, + // => let's manually enforce alignment by allocating more data and return the address of the first aligned element. + enum { + ForceAlignment = internal::packet_traits::Vectorizable, + PacketSize = internal::packet_traits::size + }; + internal::plain_array m_data; + EIGEN_STRONG_INLINE Scalar* data() { + return ForceAlignment + ? reinterpret_cast((reinterpret_cast(m_data.array) & ~(size_t(15))) + 16) + : m_data.array; + } + #endif +}; + +template<> struct gemv_selector +{ + template + static inline void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename ProductType::Index Index; + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::RealScalar RealScalar; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + typedef Map, Aligned> MappedDest; + + ActualLhsType actualLhs = LhsBlasTraits::extract(prod.lhs()); + ActualRhsType actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + EvalToDestAtCompileTime = Dest::InnerStrideAtCompileTime==1, + ComplexByReal = (NumTraits::IsComplex) && (!NumTraits::IsComplex), + MightCannotUseDest = (Dest::InnerStrideAtCompileTime!=1) || ComplexByReal + }; + + gemv_static_vector_if static_dest; + + bool alphaIsCompatible = (!ComplexByReal) || (imag(actualAlpha)==RealScalar(0)); + bool evalToDest = EvalToDestAtCompileTime && alphaIsCompatible; + + RhsScalar compatibleAlpha = get_factor::run(actualAlpha); + + ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(), + evalToDest ? dest.data() : static_dest.data()); + + if(!evalToDest) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = dest.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + if(!alphaIsCompatible) + { + MappedDest(actualDestPtr, dest.size()).setZero(); + compatibleAlpha = RhsScalar(1); + } + else + MappedDest(actualDestPtr, dest.size()) = dest; + } + + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + actualLhs.data(), actualLhs.outerStride(), + actualRhs.data(), actualRhs.innerStride(), + actualDestPtr, 1, + compatibleAlpha); + + if (!evalToDest) + { + if(!alphaIsCompatible) + dest += actualAlpha * MappedDest(actualDestPtr, dest.size()); + else + dest = MappedDest(actualDestPtr, dest.size()); + } + } +}; + +template<> struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename ProductType::LhsScalar LhsScalar; + typedef typename ProductType::RhsScalar RhsScalar; + typedef typename ProductType::Scalar ResScalar; + typedef typename ProductType::Index Index; + typedef typename ProductType::ActualLhsType ActualLhsType; + typedef typename ProductType::ActualRhsType ActualRhsType; + typedef typename ProductType::_ActualRhsType _ActualRhsType; + typedef typename ProductType::LhsBlasTraits LhsBlasTraits; + typedef typename ProductType::RhsBlasTraits RhsBlasTraits; + + typename add_const::type actualLhs = LhsBlasTraits::extract(prod.lhs()); + typename add_const::type actualRhs = RhsBlasTraits::extract(prod.rhs()); + + ResScalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(prod.lhs()) + * RhsBlasTraits::extractScalarFactor(prod.rhs()); + + enum { + // FIXME find a way to allow an inner stride on the result if packet_traits::size==1 + // on, the other hand it is good for the cache to pack the vector anyways... + DirectlyUseRhs = _ActualRhsType::InnerStrideAtCompileTime==1 + }; + + gemv_static_vector_if static_rhs; + + ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,actualRhs.size(), + DirectlyUseRhs ? const_cast(actualRhs.data()) : static_rhs.data()); + + if(!DirectlyUseRhs) + { + #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN + int size = actualRhs.size(); + EIGEN_DENSE_STORAGE_CTOR_PLUGIN + #endif + Map(actualRhsPtr, actualRhs.size()) = actualRhs; + } + + general_matrix_vector_product + ::run( + actualLhs.rows(), actualLhs.cols(), + actualLhs.data(), actualLhs.outerStride(), + actualRhsPtr, 1, + dest.data(), dest.innerStride(), + actualAlpha); + } +}; + +template<> struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename Dest::Index Index; + // TODO makes sure dest is sequentially stored in memory, otherwise use a temp + const Index size = prod.rhs().rows(); + for(Index k=0; k struct gemv_selector +{ + template + static void run(const ProductType& prod, Dest& dest, typename ProductType::Scalar alpha) + { + typedef typename Dest::Index Index; + // TODO makes sure rhs is sequentially stored in memory, otherwise use a temp + const Index rows = prod.rows(); + for(Index i=0; i +template +inline const typename ProductReturnType::Type +MatrixBase::operator*(const MatrixBase &other) const +{ + // A note regarding the function declaration: In MSVC, this function will sometimes + // not be inlined since DenseStorage is an unwindable object for dynamic + // matrices and product types are holding a member to store the result. + // Thus it does not help tagging this function with EIGEN_STRONG_INLINE. + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) +#ifdef EIGEN_DEBUG_PRODUCT + internal::product_type::debug(); +#endif + return typename ProductReturnType::Type(derived(), other.derived()); +} + +/** \returns an expression of the matrix product of \c *this and \a other without implicit evaluation. + * + * The returned product will behave like any other expressions: the coefficients of the product will be + * computed once at a time as requested. This might be useful in some extremely rare cases when only + * a small and no coherent fraction of the result's coefficients have to be computed. + * + * \warning This version of the matrix product can be much much slower. So use it only if you know + * what you are doing and that you measured a true speed improvement. + * + * \sa operator*(const MatrixBase&) + */ +template +template +const typename LazyProductReturnType::Type +MatrixBase::lazyProduct(const MatrixBase &other) const +{ + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwiseProduct(v2) + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + + return typename LazyProductReturnType::Type(derived(), other.derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_PRODUCT_H diff --git a/latan/Eigen/src/Core/GenericPacketMath.h b/latan/Eigen/src/Core/GenericPacketMath.h new file mode 100644 index 0000000..858fb24 --- /dev/null +++ b/latan/Eigen/src/Core/GenericPacketMath.h @@ -0,0 +1,328 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GENERIC_PACKET_MATH_H +#define EIGEN_GENERIC_PACKET_MATH_H + +namespace Eigen { + +namespace internal { + +/** \internal + * \file GenericPacketMath.h + * + * Default implementation for types not supported by the vectorization. + * In practice these functions are provided to make easier the writing + * of generic vectorized code. + */ + +#ifndef EIGEN_DEBUG_ALIGNED_LOAD +#define EIGEN_DEBUG_ALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_LOAD +#define EIGEN_DEBUG_UNALIGNED_LOAD +#endif + +#ifndef EIGEN_DEBUG_ALIGNED_STORE +#define EIGEN_DEBUG_ALIGNED_STORE +#endif + +#ifndef EIGEN_DEBUG_UNALIGNED_STORE +#define EIGEN_DEBUG_UNALIGNED_STORE +#endif + +struct default_packet_traits +{ + enum { + HasAdd = 1, + HasSub = 1, + HasMul = 1, + HasNegate = 1, + HasAbs = 1, + HasAbs2 = 1, + HasMin = 1, + HasMax = 1, + HasConj = 1, + HasSetLinear = 1, + + HasDiv = 0, + HasSqrt = 0, + HasExp = 0, + HasLog = 0, + HasPow = 0, + + HasSin = 0, + HasCos = 0, + HasTan = 0, + HasASin = 0, + HasACos = 0, + HasATan = 0 + }; +}; + +template struct packet_traits : default_packet_traits +{ + typedef T type; + enum { + Vectorizable = 0, + size = 1, + AlignedOnScalar = 0 + }; + enum { + HasAdd = 0, + HasSub = 0, + HasMul = 0, + HasNegate = 0, + HasAbs = 0, + HasAbs2 = 0, + HasMin = 0, + HasMax = 0, + HasConj = 0, + HasSetLinear = 0 + }; +}; + +/** \internal \returns a + b (coeff-wise) */ +template inline Packet +padd(const Packet& a, + const Packet& b) { return a+b; } + +/** \internal \returns a - b (coeff-wise) */ +template inline Packet +psub(const Packet& a, + const Packet& b) { return a-b; } + +/** \internal \returns -a (coeff-wise) */ +template inline Packet +pnegate(const Packet& a) { return -a; } + +/** \internal \returns conj(a) (coeff-wise) */ +template inline Packet +pconj(const Packet& a) { return conj(a); } + +/** \internal \returns a * b (coeff-wise) */ +template inline Packet +pmul(const Packet& a, + const Packet& b) { return a*b; } + +/** \internal \returns a / b (coeff-wise) */ +template inline Packet +pdiv(const Packet& a, + const Packet& b) { return a/b; } + +/** \internal \returns the min of \a a and \a b (coeff-wise) */ +template inline Packet +pmin(const Packet& a, + const Packet& b) { using std::min; return (min)(a, b); } + +/** \internal \returns the max of \a a and \a b (coeff-wise) */ +template inline Packet +pmax(const Packet& a, + const Packet& b) { using std::max; return (max)(a, b); } + +/** \internal \returns the absolute value of \a a */ +template inline Packet +pabs(const Packet& a) { return abs(a); } + +/** \internal \returns the bitwise and of \a a and \a b */ +template inline Packet +pand(const Packet& a, const Packet& b) { return a & b; } + +/** \internal \returns the bitwise or of \a a and \a b */ +template inline Packet +por(const Packet& a, const Packet& b) { return a | b; } + +/** \internal \returns the bitwise xor of \a a and \a b */ +template inline Packet +pxor(const Packet& a, const Packet& b) { return a ^ b; } + +/** \internal \returns the bitwise andnot of \a a and \a b */ +template inline Packet +pandnot(const Packet& a, const Packet& b) { return a & (!b); } + +/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ +template inline Packet +pload(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet version of \a *from, (un-aligned load) */ +template inline Packet +ploadu(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet with elements of \a *from duplicated, e.g.: (from[0],from[0],from[1],from[1]) */ +template inline Packet +ploaddup(const typename unpacket_traits::type* from) { return *from; } + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ +template inline Packet +pset1(const typename unpacket_traits::type& a) { return a; } + +/** \internal \brief Returns a packet with coefficients (a,a+1,...,a+packet_size-1). */ +template inline typename packet_traits::type +plset(const Scalar& a) { return a; } + +/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ +template inline void pstore(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal copy the packet \a from to \a *to, (un-aligned store) */ +template inline void pstoreu(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal tries to do cache prefetching of \a addr */ +template inline void prefetch(const Scalar* addr) +{ +#if !defined(_MSC_VER) +__builtin_prefetch(addr); +#endif +} + +/** \internal \returns the first element of a packet */ +template inline typename unpacket_traits::type pfirst(const Packet& a) +{ return a; } + +/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ +template inline Packet +preduxp(const Packet* vecs) { return vecs[0]; } + +/** \internal \returns the sum of the elements of \a a*/ +template inline typename unpacket_traits::type predux(const Packet& a) +{ return a; } + +/** \internal \returns the product of the elements of \a a*/ +template inline typename unpacket_traits::type predux_mul(const Packet& a) +{ return a; } + +/** \internal \returns the min of the elements of \a a*/ +template inline typename unpacket_traits::type predux_min(const Packet& a) +{ return a; } + +/** \internal \returns the max of the elements of \a a*/ +template inline typename unpacket_traits::type predux_max(const Packet& a) +{ return a; } + +/** \internal \returns the reversed elements of \a a*/ +template inline Packet preverse(const Packet& a) +{ return a; } + + +/** \internal \returns \a a with real and imaginary part flipped (for complex type only) */ +template inline Packet pcplxflip(const Packet& a) +{ return Packet(imag(a),real(a)); } + +/************************** +* Special math functions +***************************/ + +/** \internal \returns the sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet psin(const Packet& a) { return sin(a); } + +/** \internal \returns the cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pcos(const Packet& a) { return cos(a); } + +/** \internal \returns the tan of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet ptan(const Packet& a) { return tan(a); } + +/** \internal \returns the arc sine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pasin(const Packet& a) { return asin(a); } + +/** \internal \returns the arc cosine of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pacos(const Packet& a) { return acos(a); } + +/** \internal \returns the exp of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet pexp(const Packet& a) { return exp(a); } + +/** \internal \returns the log of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet plog(const Packet& a) { return log(a); } + +/** \internal \returns the square-root of \a a (coeff-wise) */ +template EIGEN_DECLARE_FUNCTION_ALLOWING_MULTIPLE_DEFINITIONS +Packet psqrt(const Packet& a) { return sqrt(a); } + +/*************************************************************************** +* The following functions might not have to be overwritten for vectorized types +***************************************************************************/ + +/** \internal copy a packet with constant coeficient \a a (e.g., [a,a,a,a]) to \a *to. \a to must be 16 bytes aligned */ +// NOTE: this function must really be templated on the packet type (think about different packet types for the same scalar type) +template +inline void pstore1(typename unpacket_traits::type* to, const typename unpacket_traits::type& a) +{ + pstore(to, pset1(a)); +} + +/** \internal \returns a * b + c (coeff-wise) */ +template inline Packet +pmadd(const Packet& a, + const Packet& b, + const Packet& c) +{ return padd(pmul(a, b),c); } + +/** \internal \returns a packet version of \a *from. + * If LoadMode equals #Aligned, \a from must be 16 bytes aligned */ +template +inline Packet ploadt(const typename unpacket_traits::type* from) +{ + if(LoadMode == Aligned) + return pload(from); + else + return ploadu(from); +} + +/** \internal copy the packet \a from to \a *to. + * If StoreMode equals #Aligned, \a to must be 16 bytes aligned */ +template +inline void pstoret(Scalar* to, const Packet& from) +{ + if(LoadMode == Aligned) + pstore(to, from); + else + pstoreu(to, from); +} + +/** \internal default implementation of palign() allowing partial specialization */ +template +struct palign_impl +{ + // by default data are aligned, so there is nothing to be done :) + static inline void run(PacketType&, const PacketType&) {} +}; + +/** \internal update \a first using the concatenation of the \a Offset last elements + * of \a first and packet_size minus \a Offset first elements of \a second */ +template +inline void palign(PacketType& first, const PacketType& second) +{ + palign_impl::run(first,second); +} + +/*************************************************************************** +* Fast complex products (GCC generates a function call which is very slow) +***************************************************************************/ + +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } + +template<> inline std::complex pmul(const std::complex& a, const std::complex& b) +{ return std::complex(real(a)*real(b) - imag(a)*imag(b), imag(a)*real(b) + real(a)*imag(b)); } + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_GENERIC_PACKET_MATH_H + diff --git a/latan/Eigen/src/Core/GlobalFunctions.h b/latan/Eigen/src/Core/GlobalFunctions.h new file mode 100644 index 0000000..e63726c --- /dev/null +++ b/latan/Eigen/src/Core/GlobalFunctions.h @@ -0,0 +1,103 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2010 Gael Guennebaud +// Copyright (C) 2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_GLOBAL_FUNCTIONS_H +#define EIGEN_GLOBAL_FUNCTIONS_H + +#define EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(NAME,FUNCTOR) \ + template \ + inline const Eigen::CwiseUnaryOp, const Derived> \ + NAME(const Eigen::ArrayBase& x) { \ + return x.derived(); \ + } + +#define EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(NAME,FUNCTOR) \ + \ + template \ + struct NAME##_retval > \ + { \ + typedef const Eigen::CwiseUnaryOp, const Derived> type; \ + }; \ + template \ + struct NAME##_impl > \ + { \ + static inline typename NAME##_retval >::type run(const Eigen::ArrayBase& x) \ + { \ + return x.derived(); \ + } \ + }; + + +namespace std +{ + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(real,scalar_real_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(imag,scalar_imag_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sin,scalar_sin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(cos,scalar_cos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(asin,scalar_asin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(acos,scalar_acos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(tan,scalar_tan_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(exp,scalar_exp_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(log,scalar_log_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(abs,scalar_abs_op) + EIGEN_ARRAY_DECLARE_GLOBAL_STD_UNARY(sqrt,scalar_sqrt_op) + + template + inline const Eigen::CwiseUnaryOp, const Derived> + pow(const Eigen::ArrayBase& x, const typename Derived::Scalar& exponent) { + return x.derived().pow(exponent); + } + + template + inline const Eigen::CwiseBinaryOp, const Derived, const Derived> + pow(const Eigen::ArrayBase& x, const Eigen::ArrayBase& exponents) + { + return Eigen::CwiseBinaryOp, const Derived, const Derived>( + x.derived(), + exponents.derived() + ); + } +} + +namespace Eigen +{ + /** + * \brief Component-wise division of a scalar by array elements. + **/ + template + inline const Eigen::CwiseUnaryOp, const Derived> + operator/(typename Derived::Scalar s, const Eigen::ArrayBase& a) + { + return Eigen::CwiseUnaryOp, const Derived>( + a.derived(), + Eigen::internal::scalar_inverse_mult_op(s) + ); + } + + namespace internal + { + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(real,scalar_real_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(imag,scalar_imag_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sin,scalar_sin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(cos,scalar_cos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(asin,scalar_asin_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(acos,scalar_acos_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(tan,scalar_tan_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(exp,scalar_exp_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(log,scalar_log_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs,scalar_abs_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(abs2,scalar_abs2_op) + EIGEN_ARRAY_DECLARE_GLOBAL_EIGEN_UNARY(sqrt,scalar_sqrt_op) + } +} + +// TODO: cleanly disable those functions that are not supported on Array (internal::real_ref, internal::random, internal::isApprox...) + +#endif // EIGEN_GLOBAL_FUNCTIONS_H diff --git a/latan/Eigen/src/Core/IO.h b/latan/Eigen/src/Core/IO.h new file mode 100644 index 0000000..cc8e18a --- /dev/null +++ b/latan/Eigen/src/Core/IO.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_IO_H +#define EIGEN_IO_H + +namespace Eigen { + +enum { DontAlignCols = 1 }; +enum { StreamPrecision = -1, + FullPrecision = -2 }; + +namespace internal { +template +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt); +} + +/** \class IOFormat + * \ingroup Core_Module + * + * \brief Stores a set of parameters controlling the way matrices are printed + * + * List of available parameters: + * - \b precision number of digits for floating point values, or one of the special constants \c StreamPrecision and \c FullPrecision. + * The default is the special value \c StreamPrecision which means to use the + * stream's own precision setting, as set for instance using \c cout.precision(3). The other special value + * \c FullPrecision means that the number of digits will be computed to match the full precision of each floating-point + * type. + * - \b flags an OR-ed combination of flags, the default value is 0, the only currently available flag is \c DontAlignCols which + * allows to disable the alignment of columns, resulting in faster code. + * - \b coeffSeparator string printed between two coefficients of the same row + * - \b rowSeparator string printed between two rows + * - \b rowPrefix string printed at the beginning of each row + * - \b rowSuffix string printed at the end of each row + * - \b matPrefix string printed at the beginning of the matrix + * - \b matSuffix string printed at the end of the matrix + * + * Example: \include IOFormat.cpp + * Output: \verbinclude IOFormat.out + * + * \sa DenseBase::format(), class WithFormat + */ +struct IOFormat +{ + /** Default contructor, see class IOFormat for the meaning of the parameters */ + IOFormat(int _precision = StreamPrecision, int _flags = 0, + const std::string& _coeffSeparator = " ", + const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", + const std::string& _matPrefix="", const std::string& _matSuffix="") + : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator), + coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags) + { + rowSpacer = ""; + int i = int(matSuffix.length())-1; + while (i>=0 && matSuffix[i]!='\n') + { + rowSpacer += ' '; + i--; + } + } + std::string matPrefix, matSuffix; + std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; + std::string coeffSeparator; + int precision; + int flags; +}; + +/** \class WithFormat + * \ingroup Core_Module + * + * \brief Pseudo expression providing matrix output with given format + * + * \param ExpressionType the type of the object on which IO stream operations are performed + * + * This class represents an expression with stream operators controlled by a given IOFormat. + * It is the return type of DenseBase::format() + * and most of the time this is the only way it is used. + * + * See class IOFormat for some examples. + * + * \sa DenseBase::format(), class IOFormat + */ +template +class WithFormat +{ + public: + + WithFormat(const ExpressionType& matrix, const IOFormat& format) + : m_matrix(matrix), m_format(format) + {} + + friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) + { + return internal::print_matrix(s, wf.m_matrix.eval(), wf.m_format); + } + + protected: + const typename ExpressionType::Nested m_matrix; + IOFormat m_format; +}; + +/** \returns a WithFormat proxy object allowing to print a matrix the with given + * format \a fmt. + * + * See class IOFormat for some examples. + * + * \sa class IOFormat, class WithFormat + */ +template +inline const WithFormat +DenseBase::format(const IOFormat& fmt) const +{ + return WithFormat(derived(), fmt); +} + +namespace internal { + +template +struct significant_decimals_default_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline int run() + { + using std::ceil; + return cast(ceil(-log(NumTraits::epsilon())/log(RealScalar(10)))); + } +}; + +template +struct significant_decimals_default_impl +{ + static inline int run() + { + return 0; + } +}; + +template +struct significant_decimals_impl + : significant_decimals_default_impl::IsInteger> +{}; + +/** \internal + * print the matrix \a _m to the output stream \a s using the output format \a fmt */ +template +std::ostream & print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) +{ + if(_m.size() == 0) + { + s << fmt.matPrefix << fmt.matSuffix; + return s; + } + + typename Derived::Nested m = _m; + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; + + Index width = 0; + + std::streamsize explicit_precision; + if(fmt.precision == StreamPrecision) + { + explicit_precision = 0; + } + else if(fmt.precision == FullPrecision) + { + if (NumTraits::IsInteger) + { + explicit_precision = 0; + } + else + { + explicit_precision = significant_decimals_impl::run(); + } + } + else + { + explicit_precision = fmt.precision; + } + + bool align_cols = !(fmt.flags & DontAlignCols); + if(align_cols) + { + // compute the largest width + for(Index j = 1; j < m.cols(); ++j) + for(Index i = 0; i < m.rows(); ++i) + { + std::stringstream sstr; + if(explicit_precision) sstr.precision(explicit_precision); + sstr << m.coeff(i,j); + width = std::max(width, Index(sstr.str().length())); + } + } + std::streamsize old_precision = 0; + if(explicit_precision) old_precision = s.precision(explicit_precision); + s << fmt.matPrefix; + for(Index i = 0; i < m.rows(); ++i) + { + if (i) + s << fmt.rowSpacer; + s << fmt.rowPrefix; + if(width) s.width(width); + s << m.coeff(i, 0); + for(Index j = 1; j < m.cols(); ++j) + { + s << fmt.coeffSeparator; + if (width) s.width(width); + s << m.coeff(i, j); + } + s << fmt.rowSuffix; + if( i < m.rows() - 1) + s << fmt.rowSeparator; + } + s << fmt.matSuffix; + if(explicit_precision) s.precision(old_precision); + return s; +} + +} // end namespace internal + +/** \relates DenseBase + * + * Outputs the matrix, to the given stream. + * + * If you wish to print the matrix with a format different than the default, use DenseBase::format(). + * + * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. + * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters. + * + * \sa DenseBase::format() + */ +template +std::ostream & operator << +(std::ostream & s, + const DenseBase & m) +{ + return internal::print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); +} + +} // end namespace Eigen + +#endif // EIGEN_IO_H diff --git a/latan/Eigen/src/Core/Map.h b/latan/Eigen/src/Core/Map.h new file mode 100644 index 0000000..15a1922 --- /dev/null +++ b/latan/Eigen/src/Core/Map.h @@ -0,0 +1,192 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAP_H +#define EIGEN_MAP_H + +namespace Eigen { + +/** \class Map + * \ingroup Core_Module + * + * \brief A matrix or vector expression mapping an existing array of data. + * + * \tparam PlainObjectType the equivalent matrix type of the mapped data + * \tparam MapOptions specifies whether the pointer is \c #Aligned, or \c #Unaligned. + * The default is \c #Unaligned. + * \tparam StrideType optionally specifies strides. By default, Map assumes the memory layout + * of an ordinary, contiguous array. This can be overridden by specifying strides. + * The type passed here must be a specialization of the Stride template, see examples below. + * + * This class represents a matrix or vector expression mapping an existing array of data. + * It can be used to let Eigen interface without any overhead with non-Eigen data structures, + * such as plain C arrays or structures from other libraries. By default, it assumes that the + * data is laid out contiguously in memory. You can however override this by explicitly specifying + * inner and outer strides. + * + * Here's an example of simply mapping a contiguous array as a \ref TopicStorageOrders "column-major" matrix: + * \include Map_simple.cpp + * Output: \verbinclude Map_simple.out + * + * If you need to map non-contiguous arrays, you can do so by specifying strides: + * + * Here's an example of mapping an array as a vector, specifying an inner stride, that is, the pointer + * increment between two consecutive coefficients. Here, we're specifying the inner stride as a compile-time + * fixed value. + * \include Map_inner_stride.cpp + * Output: \verbinclude Map_inner_stride.out + * + * Here's an example of mapping an array while specifying an outer stride. Here, since we're mapping + * as a column-major matrix, 'outer stride' means the pointer increment between two consecutive columns. + * Here, we're specifying the outer stride as a runtime parameter. Note that here \c OuterStride<> is + * a short version of \c OuterStride because the default template parameter of OuterStride + * is \c Dynamic + * \include Map_outer_stride.cpp + * Output: \verbinclude Map_outer_stride.out + * + * For more details and for an example of specifying both an inner and an outer stride, see class Stride. + * + * \b Tip: to change the array of data mapped by a Map object, you can use the C++ + * placement new syntax: + * + * Example: \include Map_placement_new.cpp + * Output: \verbinclude Map_placement_new.out + * + * This class is the return type of PlainObjectBase::Map() but can also be used directly. + * + * \sa PlainObjectBase::Map(), \ref TopicStorageOrders + */ + +namespace internal { +template +struct traits > + : public traits +{ + typedef traits TraitsBase; + typedef typename PlainObjectType::Index Index; + typedef typename PlainObjectType::Scalar Scalar; + enum { + InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0 + ? int(PlainObjectType::InnerStrideAtCompileTime) + : int(StrideType::InnerStrideAtCompileTime), + OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0 + ? int(PlainObjectType::OuterStrideAtCompileTime) + : int(StrideType::OuterStrideAtCompileTime), + HasNoInnerStride = InnerStrideAtCompileTime == 1, + HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0, + HasNoStride = HasNoInnerStride && HasNoOuterStride, + IsAligned = bool(EIGEN_ALIGN) && ((int(MapOptions)&Aligned)==Aligned), + IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic, + KeepsPacketAccess = bool(HasNoInnerStride) + && ( bool(IsDynamicSize) + || HasNoOuterStride + || ( OuterStrideAtCompileTime!=Dynamic + && ((static_cast(sizeof(Scalar))*OuterStrideAtCompileTime)%16)==0 ) ), + Flags0 = TraitsBase::Flags & (~NestByRefBit), + Flags1 = IsAligned ? (int(Flags0) | AlignedBit) : (int(Flags0) & ~AlignedBit), + Flags2 = (bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime)) + ? int(Flags1) : int(Flags1 & ~LinearAccessBit), + Flags3 = is_lvalue::value ? int(Flags2) : (int(Flags2) & ~LvalueBit), + Flags = KeepsPacketAccess ? int(Flags3) : (int(Flags3) & ~PacketAccessBit) + }; +private: + enum { Options }; // Expressions don't have Options +}; +} + +template class Map + : public MapBase > +{ + public: + + typedef MapBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Map) + + typedef typename Base::PointerType PointerType; +#if EIGEN2_SUPPORT_STAGE <= STAGE30_FULL_EIGEN3_API + typedef const Scalar* PointerArgType; + inline PointerType cast_to_pointer_type(PointerArgType ptr) { return const_cast(ptr); } +#else + typedef PointerType PointerArgType; + inline PointerType cast_to_pointer_type(PointerArgType ptr) { return ptr; } +#endif + + inline Index innerStride() const + { + return StrideType::InnerStrideAtCompileTime != 0 ? m_stride.inner() : 1; + } + + inline Index outerStride() const + { + return StrideType::OuterStrideAtCompileTime != 0 ? m_stride.outer() + : IsVectorAtCompileTime ? this->size() + : int(Flags)&RowMajorBit ? this->cols() + : this->rows(); + } + + /** Constructor in the fixed-size case. + * + * \param data pointer to the array to map + * \param stride optional Stride object, passing the strides. + */ + inline Map(PointerArgType data, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data)), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + /** Constructor in the dynamic-size vector case. + * + * \param data pointer to the array to map + * \param size the size of the vector expression + * \param stride optional Stride object, passing the strides. + */ + inline Map(PointerArgType data, Index size, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data), size), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + /** Constructor in the dynamic-size matrix case. + * + * \param data pointer to the array to map + * \param rows the number of rows of the matrix expression + * \param cols the number of columns of the matrix expression + * \param stride optional Stride object, passing the strides. + */ + inline Map(PointerArgType data, Index rows, Index cols, const StrideType& stride = StrideType()) + : Base(cast_to_pointer_type(data), rows, cols), m_stride(stride) + { + PlainObjectType::Base::_check_template_params(); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) + + protected: + StrideType m_stride; +}; + +template +inline Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> + ::Array(const Scalar *data) +{ + this->_set_noalias(Eigen::Map(data)); +} + +template +inline Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols> + ::Matrix(const Scalar *data) +{ + this->_set_noalias(Eigen::Map(data)); +} + +} // end namespace Eigen + +#endif // EIGEN_MAP_H diff --git a/latan/Eigen/src/Core/MapBase.h b/latan/Eigen/src/Core/MapBase.h new file mode 100644 index 0000000..a388d61 --- /dev/null +++ b/latan/Eigen/src/Core/MapBase.h @@ -0,0 +1,242 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2007-2010 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MAPBASE_H +#define EIGEN_MAPBASE_H + +#define EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) \ + EIGEN_STATIC_ASSERT((int(internal::traits::Flags) & LinearAccessBit) || Derived::IsVectorAtCompileTime, \ + YOU_ARE_TRYING_TO_USE_AN_INDEX_BASED_ACCESSOR_ON_AN_EXPRESSION_THAT_DOES_NOT_SUPPORT_THAT) + +namespace Eigen { + +/** \class MapBase + * \ingroup Core_Module + * + * \brief Base class for Map and Block expression with direct access + * + * \sa class Map, class Block + */ +template class MapBase + : public internal::dense_xpr_base::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + enum { + RowsAtCompileTime = internal::traits::RowsAtCompileTime, + ColsAtCompileTime = internal::traits::ColsAtCompileTime, + SizeAtCompileTime = Base::SizeAtCompileTime + }; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef typename internal::conditional< + bool(internal::is_lvalue::value), + Scalar *, + const Scalar *>::type + PointerType; + + using Base::derived; +// using Base::RowsAtCompileTime; +// using Base::ColsAtCompileTime; +// using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + using Base::IsRowMajor; + + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::eval; + + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + + // bug 217 - compile error on ICC 11.1 + using Base::operator=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + + inline Index rows() const { return m_rows.value(); } + inline Index cols() const { return m_cols.value(); } + + /** Returns a pointer to the first coefficient of the matrix or vector. + * + * \note When addressing this data, make sure to honor the strides returned by innerStride() and outerStride(). + * + * \sa innerStride(), outerStride() + */ + inline const Scalar* data() const { return m_data; } + + inline const Scalar& coeff(Index row, Index col) const + { + return m_data[col * colStride() + row * rowStride()]; + } + + inline const Scalar& coeff(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return m_data[index * innerStride()]; + } + + inline const Scalar& coeffRef(Index row, Index col) const + { + return this->m_data[col * colStride() + row * rowStride()]; + } + + inline const Scalar& coeffRef(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + template + inline PacketScalar packet(Index row, Index col) const + { + return internal::ploadt + (m_data + (col * colStride() + row * rowStride())); + } + + template + inline PacketScalar packet(Index index) const + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return internal::ploadt(m_data + index * innerStride()); + } + + inline MapBase(PointerType data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + checkSanity(); + } + + inline MapBase(PointerType data, Index size) + : m_data(data), + m_rows(RowsAtCompileTime == Dynamic ? size : Index(RowsAtCompileTime)), + m_cols(ColsAtCompileTime == Dynamic ? size : Index(ColsAtCompileTime)) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + eigen_assert(size >= 0); + eigen_assert(data == 0 || SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + checkSanity(); + } + + inline MapBase(PointerType data, Index rows, Index cols) + : m_data(data), m_rows(rows), m_cols(cols) + { + eigen_assert( (data == 0) + || ( rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); + checkSanity(); + } + + protected: + + void checkSanity() const + { + EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(internal::traits::Flags&PacketAccessBit, + internal::inner_stride_at_compile_time::ret==1), + PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1); + eigen_assert(EIGEN_IMPLIES(internal::traits::Flags&AlignedBit, (size_t(m_data) % 16) == 0) + && "data is not aligned"); + } + + PointerType m_data; + const internal::variable_if_dynamic m_rows; + const internal::variable_if_dynamic m_cols; +}; + +template class MapBase + : public MapBase +{ + public: + + typedef MapBase Base; + + typedef typename Base::Scalar Scalar; + typedef typename Base::PacketScalar PacketScalar; + typedef typename Base::Index Index; + typedef typename Base::PointerType PointerType; + + using Base::derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + + using Base::innerStride; + using Base::outerStride; + using Base::rowStride; + using Base::colStride; + + typedef typename internal::conditional< + internal::is_lvalue::value, + Scalar, + const Scalar + >::type ScalarWithConstIfNotLvalue; + + inline const Scalar* data() const { return this->m_data; } + inline ScalarWithConstIfNotLvalue* data() { return this->m_data; } // no const-cast here so non-const-correct code will give a compile error + + inline ScalarWithConstIfNotLvalue& coeffRef(Index row, Index col) + { + return this->m_data[col * colStride() + row * rowStride()]; + } + + inline ScalarWithConstIfNotLvalue& coeffRef(Index index) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + return this->m_data[index * innerStride()]; + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + internal::pstoret + (this->m_data + (col * colStride() + row * rowStride()), x); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS(Derived) + internal::pstoret + (this->m_data + index * innerStride(), x); + } + + explicit inline MapBase(PointerType data) : Base(data) {} + inline MapBase(PointerType data, Index size) : Base(data, size) {} + inline MapBase(PointerType data, Index rows, Index cols) : Base(data, rows, cols) {} + + Derived& operator=(const MapBase& other) + { + Base::Base::operator=(other); + return derived(); + } + + using Base::Base::operator=; +}; + +} // end namespace Eigen + +#endif // EIGEN_MAPBASE_H diff --git a/latan/Eigen/src/Core/MathFunctions.h b/latan/Eigen/src/Core/MathFunctions.h new file mode 100644 index 0000000..05e913f --- /dev/null +++ b/latan/Eigen/src/Core/MathFunctions.h @@ -0,0 +1,842 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATHFUNCTIONS_H +#define EIGEN_MATHFUNCTIONS_H + +namespace Eigen { + +namespace internal { + +/** \internal \struct global_math_functions_filtering_base + * + * What it does: + * Defines a typedef 'type' as follows: + * - if type T has a member typedef Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl, then + * global_math_functions_filtering_base::type is a typedef for it. + * - otherwise, global_math_functions_filtering_base::type is a typedef for T. + * + * How it's used: + * To allow to defined the global math functions (like sin...) in certain cases, like the Array expressions. + * When you do sin(array1+array2), the object array1+array2 has a complicated expression type, all what you want to know + * is that it inherits ArrayBase. So we implement a partial specialization of sin_impl for ArrayBase. + * So we must make sure to use sin_impl > and not sin_impl, otherwise our partial specialization + * won't be used. How does sin know that? That's exactly what global_math_functions_filtering_base tells it. + * + * How it's implemented: + * SFINAE in the style of enable_if. Highly susceptible of breaking compilers. With GCC, it sure does work, but if you replace + * the typename dummy by an integer template parameter, it doesn't work anymore! + */ + +template +struct global_math_functions_filtering_base +{ + typedef T type; +}; + +template struct always_void { typedef void type; }; + +template +struct global_math_functions_filtering_base + ::type + > +{ + typedef typename T::Eigen_BaseClassForSpecializationOfGlobalMathFuncImpl type; +}; + +#define EIGEN_MATHFUNC_IMPL(func, scalar) func##_impl::type> +#define EIGEN_MATHFUNC_RETVAL(func, scalar) typename func##_retval::type>::type + + +/**************************************************************************** +* Implementation of real * +****************************************************************************/ + +template +struct real_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) + { + return x; + } +}; + +template +struct real_impl > +{ + static inline RealScalar run(const std::complex& x) + { + using std::real; + return real(x); + } +}; + +template +struct real_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(real, Scalar) real(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(real, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of imag * +****************************************************************************/ + +template +struct imag_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar&) + { + return RealScalar(0); + } +}; + +template +struct imag_impl > +{ + static inline RealScalar run(const std::complex& x) + { + using std::imag; + return imag(x); + } +}; + +template +struct imag_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(imag, Scalar) imag(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(imag, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of real_ref * +****************************************************************************/ + +template +struct real_ref_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar& run(Scalar& x) + { + return reinterpret_cast(&x)[0]; + } + static inline const RealScalar& run(const Scalar& x) + { + return reinterpret_cast(&x)[0]; + } +}; + +template +struct real_ref_retval +{ + typedef typename NumTraits::Real & type; +}; + +template +inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) >::type real_ref(const Scalar& x) +{ + return real_ref_impl::run(x); +} + +template +inline EIGEN_MATHFUNC_RETVAL(real_ref, Scalar) real_ref(Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(real_ref, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of imag_ref * +****************************************************************************/ + +template +struct imag_ref_default_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar& run(Scalar& x) + { + return reinterpret_cast(&x)[1]; + } + static inline const RealScalar& run(const Scalar& x) + { + return reinterpret_cast(&x)[1]; + } +}; + +template +struct imag_ref_default_impl +{ + static inline Scalar run(Scalar&) + { + return Scalar(0); + } + static inline const Scalar run(const Scalar&) + { + return Scalar(0); + } +}; + +template +struct imag_ref_impl : imag_ref_default_impl::IsComplex> {}; + +template +struct imag_ref_retval +{ + typedef typename NumTraits::Real & type; +}; + +template +inline typename add_const_on_value_type< EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) >::type imag_ref(const Scalar& x) +{ + return imag_ref_impl::run(x); +} + +template +inline EIGEN_MATHFUNC_RETVAL(imag_ref, Scalar) imag_ref(Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(imag_ref, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of conj * +****************************************************************************/ + +template +struct conj_impl +{ + static inline Scalar run(const Scalar& x) + { + return x; + } +}; + +template +struct conj_impl > +{ + static inline std::complex run(const std::complex& x) + { + using std::conj; + return conj(x); + } +}; + +template +struct conj_retval +{ + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(conj, Scalar) conj(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(conj, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of abs * +****************************************************************************/ + +template +struct abs_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) + { + using std::abs; + return abs(x); + } +}; + +template +struct abs_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(abs, Scalar) abs(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(abs, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of abs2 * +****************************************************************************/ + +template +struct abs2_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) + { + return x*x; + } +}; + +template +struct abs2_impl > +{ + static inline RealScalar run(const std::complex& x) + { + return real(x)*real(x) + imag(x)*imag(x); + } +}; + +template +struct abs2_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(abs2, Scalar) abs2(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(abs2, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of norm1 * +****************************************************************************/ + +template +struct norm1_default_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x) + { + return abs(real(x)) + abs(imag(x)); + } +}; + +template +struct norm1_default_impl +{ + static inline Scalar run(const Scalar& x) + { + return abs(x); + } +}; + +template +struct norm1_impl : norm1_default_impl::IsComplex> {}; + +template +struct norm1_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(norm1, Scalar) norm1(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(norm1, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of hypot * +****************************************************************************/ + +template +struct hypot_impl +{ + typedef typename NumTraits::Real RealScalar; + static inline RealScalar run(const Scalar& x, const Scalar& y) + { + using std::max; + using std::min; + RealScalar _x = abs(x); + RealScalar _y = abs(y); + RealScalar p = (max)(_x, _y); + RealScalar q = (min)(_x, _y); + RealScalar qp = q/p; + return p * sqrt(RealScalar(1) + qp*qp); + } +}; + +template +struct hypot_retval +{ + typedef typename NumTraits::Real type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(hypot, Scalar) hypot(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(hypot, Scalar)::run(x, y); +} + +/**************************************************************************** +* Implementation of cast * +****************************************************************************/ + +template +struct cast_impl +{ + static inline NewType run(const OldType& x) + { + return static_cast(x); + } +}; + +// here, for once, we're plainly returning NewType: we don't want cast to do weird things. + +template +inline NewType cast(const OldType& x) +{ + return cast_impl::run(x); +} + +/**************************************************************************** +* Implementation of sqrt * +****************************************************************************/ + +template +struct sqrt_default_impl +{ + static inline Scalar run(const Scalar& x) + { + using std::sqrt; + return sqrt(x); + } +}; + +template +struct sqrt_default_impl +{ + static inline Scalar run(const Scalar&) + { +#ifdef EIGEN2_SUPPORT + eigen_assert(!NumTraits::IsInteger); +#else + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) +#endif + return Scalar(0); + } +}; + +template +struct sqrt_impl : sqrt_default_impl::IsInteger> {}; + +template +struct sqrt_retval +{ + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(sqrt, Scalar) sqrt(const Scalar& x) +{ + return EIGEN_MATHFUNC_IMPL(sqrt, Scalar)::run(x); +} + +/**************************************************************************** +* Implementation of standard unary real functions (exp, log, sin, cos, ... * +****************************************************************************/ + +// This macro instanciate all the necessary template mechanism which is common to all unary real functions. +#define EIGEN_MATHFUNC_STANDARD_REAL_UNARY(NAME) \ + template struct NAME##_default_impl { \ + static inline Scalar run(const Scalar& x) { using std::NAME; return NAME(x); } \ + }; \ + template struct NAME##_default_impl { \ + static inline Scalar run(const Scalar&) { \ + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) \ + return Scalar(0); \ + } \ + }; \ + template struct NAME##_impl \ + : NAME##_default_impl::IsInteger> \ + {}; \ + template struct NAME##_retval { typedef Scalar type; }; \ + template \ + inline EIGEN_MATHFUNC_RETVAL(NAME, Scalar) NAME(const Scalar& x) { \ + return EIGEN_MATHFUNC_IMPL(NAME, Scalar)::run(x); \ + } + +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(exp) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(log) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(sin) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(cos) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(tan) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(asin) +EIGEN_MATHFUNC_STANDARD_REAL_UNARY(acos) + +/**************************************************************************** +* Implementation of atan2 * +****************************************************************************/ + +template +struct atan2_default_impl +{ + typedef Scalar retval; + static inline Scalar run(const Scalar& x, const Scalar& y) + { + using std::atan2; + return atan2(x, y); + } +}; + +template +struct atan2_default_impl +{ + static inline Scalar run(const Scalar&, const Scalar&) + { + EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar) + return Scalar(0); + } +}; + +template +struct atan2_impl : atan2_default_impl::IsInteger> {}; + +template +struct atan2_retval +{ + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(atan2, Scalar) atan2(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(atan2, Scalar)::run(x, y); +} + +/**************************************************************************** +* Implementation of pow * +****************************************************************************/ + +template +struct pow_default_impl +{ + typedef Scalar retval; + static inline Scalar run(const Scalar& x, const Scalar& y) + { + using std::pow; + return pow(x, y); + } +}; + +template +struct pow_default_impl +{ + static inline Scalar run(Scalar x, Scalar y) + { + Scalar res(1); + eigen_assert(!NumTraits::IsSigned || y >= 0); + if(y & 1) res *= x; + y >>= 1; + while(y) + { + x *= x; + if(y&1) res *= x; + y >>= 1; + } + return res; + } +}; + +template +struct pow_impl : pow_default_impl::IsInteger> {}; + +template +struct pow_retval +{ + typedef Scalar type; +}; + +template +inline EIGEN_MATHFUNC_RETVAL(pow, Scalar) pow(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(pow, Scalar)::run(x, y); +} + +/**************************************************************************** +* Implementation of random * +****************************************************************************/ + +template +struct random_default_impl {}; + +template +struct random_impl : random_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template +struct random_retval +{ + typedef Scalar type; +}; + +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y); +template inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(); + +template +struct random_default_impl +{ + static inline Scalar run(const Scalar& x, const Scalar& y) + { + return x + (y-x) * Scalar(std::rand()) / Scalar(RAND_MAX); + } + static inline Scalar run() + { + return run(Scalar(NumTraits::IsSigned ? -1 : 0), Scalar(1)); + } +}; + +enum { + floor_log2_terminate, + floor_log2_move_up, + floor_log2_move_down, + floor_log2_bogus +}; + +template struct floor_log2_selector +{ + enum { middle = (lower + upper) / 2, + value = (upper <= lower + 1) ? int(floor_log2_terminate) + : (n < (1 << middle)) ? int(floor_log2_move_down) + : (n==0) ? int(floor_log2_bogus) + : int(floor_log2_move_up) + }; +}; + +template::value> +struct floor_log2 {}; + +template +struct floor_log2 +{ + enum { value = floor_log2::middle>::value }; +}; + +template +struct floor_log2 +{ + enum { value = floor_log2::middle, upper>::value }; +}; + +template +struct floor_log2 +{ + enum { value = (n >= ((unsigned int)(1) << (lower+1))) ? lower+1 : lower }; +}; + +template +struct floor_log2 +{ + // no value, error at compile time +}; + +template +struct random_default_impl +{ + typedef typename NumTraits::NonInteger NonInteger; + + static inline Scalar run(const Scalar& x, const Scalar& y) + { + return x + Scalar((NonInteger(y)-x+1) * std::rand() / (RAND_MAX + NonInteger(1))); + } + + static inline Scalar run() + { +#ifdef EIGEN_MAKING_DOCS + return run(Scalar(NumTraits::IsSigned ? -10 : 0), Scalar(10)); +#else + enum { rand_bits = floor_log2<(unsigned int)(RAND_MAX)+1>::value, + scalar_bits = sizeof(Scalar) * CHAR_BIT, + shift = EIGEN_PLAIN_ENUM_MAX(0, int(rand_bits) - int(scalar_bits)) + }; + Scalar x = Scalar(std::rand() >> shift); + Scalar offset = NumTraits::IsSigned ? Scalar(1 << (rand_bits-1)) : Scalar(0); + return x - offset; +#endif + } +}; + +template +struct random_default_impl +{ + static inline Scalar run(const Scalar& x, const Scalar& y) + { + return Scalar(random(real(x), real(y)), + random(imag(x), imag(y))); + } + static inline Scalar run() + { + typedef typename NumTraits::Real RealScalar; + return Scalar(random(), random()); + } +}; + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random(const Scalar& x, const Scalar& y) +{ + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(x, y); +} + +template +inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random() +{ + return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); +} + +/**************************************************************************** +* Implementation of fuzzy comparisons * +****************************************************************************/ + +template +struct scalar_fuzzy_default_impl {}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template + static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) + { + return abs(x) <= abs(y) * prec; + } + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + using std::min; + return abs(x - y) <= (min)(abs(x), abs(y)) * prec; + } + static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + return x <= y || isApprox(x, y, prec); + } +}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template + static inline bool isMuchSmallerThan(const Scalar& x, const Scalar&, const RealScalar&) + { + return x == Scalar(0); + } + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar&) + { + return x == y; + } + static inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, const RealScalar&) + { + return x <= y; + } +}; + +template +struct scalar_fuzzy_default_impl +{ + typedef typename NumTraits::Real RealScalar; + template + static inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, const RealScalar& prec) + { + return abs2(x) <= abs2(y) * prec * prec; + } + static inline bool isApprox(const Scalar& x, const Scalar& y, const RealScalar& prec) + { + using std::min; + return abs2(x - y) <= (min)(abs2(x), abs2(y)) * prec * prec; + } +}; + +template +struct scalar_fuzzy_impl : scalar_fuzzy_default_impl::IsComplex, NumTraits::IsInteger> {}; + +template +inline bool isMuchSmallerThan(const Scalar& x, const OtherScalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::template isMuchSmallerThan(x, y, precision); +} + +template +inline bool isApprox(const Scalar& x, const Scalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::isApprox(x, y, precision); +} + +template +inline bool isApproxOrLessThan(const Scalar& x, const Scalar& y, + typename NumTraits::Real precision = NumTraits::dummy_precision()) +{ + return scalar_fuzzy_impl::isApproxOrLessThan(x, y, precision); +} + +/****************************************** +*** The special case of the bool type *** +******************************************/ + +template<> struct random_impl +{ + static inline bool run() + { + return random(0,1)==0 ? false : true; + } +}; + +template<> struct scalar_fuzzy_impl +{ + typedef bool RealScalar; + + template + static inline bool isMuchSmallerThan(const bool& x, const bool&, const bool&) + { + return !x; + } + + static inline bool isApprox(bool x, bool y, bool) + { + return x == y; + } + + static inline bool isApproxOrLessThan(const bool& x, const bool& y, const bool&) + { + return (!x) || y; + } + +}; + +/**************************************************************************** +* Special functions * +****************************************************************************/ + +// std::isfinite is non standard, so let's define our own version, +// even though it is not very efficient. +template bool (isfinite)(const T& x) +{ + return x::highest() && x>NumTraits::lowest(); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_MATHFUNCTIONS_H diff --git a/latan/Eigen/src/Core/Matrix.h b/latan/Eigen/src/Core/Matrix.h new file mode 100644 index 0000000..99160b5 --- /dev/null +++ b/latan/Eigen/src/Core/Matrix.h @@ -0,0 +1,405 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// Copyright (C) 2008-2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIX_H +#define EIGEN_MATRIX_H + +namespace Eigen { + +/** \class Matrix + * \ingroup Core_Module + * + * \brief The matrix class, also used for vectors and row-vectors + * + * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. + * Vectors are matrices with one column, and row-vectors are matrices with one row. + * + * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). + * + * The first three template parameters are required: + * \tparam _Scalar \anchor matrix_tparam_scalar Numeric type, e.g. float, double, int or std::complex. + * User defined sclar types are supported as well (see \ref user_defined_scalars "here"). + * \tparam _Rows Number of rows, or \b Dynamic + * \tparam _Cols Number of columns, or \b Dynamic + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \tparam _Options \anchor matrix_tparam_options A combination of either \b #RowMajor or \b #ColMajor, and of either + * \b #AutoAlign or \b #DontAlign. + * The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. + * \tparam _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). + * \tparam _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). + * + * Eigen provides a number of typedefs covering the usual cases. Here are some examples: + * + * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) + * \li \c Vector4f is a vector of 4 floats (\c Matrix) + * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) + * + * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) + * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) + * + * \li \c Matrix2Xf is a partially fixed-size (dynamic-size) matrix of floats (\c Matrix) + * \li \c MatrixX3d is a partially dynamic-size (fixed-size) matrix of double (\c Matrix) + * + * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. + * + * You can access elements of vectors and matrices using normal subscripting: + * + * \code + * Eigen::VectorXd v(10); + * v[0] = 0.1; + * v[1] = 0.2; + * v(0) = 0.3; + * v(1) = 0.4; + * + * Eigen::MatrixXi m(10, 10); + * m(0, 1) = 1; + * m(0, 2) = 2; + * m(0, 3) = 3; + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIX_PLUGIN. + * + * Some notes: + * + *
+ *
\anchor dense Dense versus sparse:
+ *
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. + * + * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. + * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.
+ * + *
\anchor fixedsize Fixed-size versus dynamic-size:
+ *
Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array + * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up + * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. + * + * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime + * variables, and the array of coefficients is allocated dynamically on the heap. + * + * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of a std::map. + * If you want this behavior, see the Sparse module.
+ * + *
\anchor maxrows _MaxRows and _MaxCols:
+ *
In most cases, one just leaves these parameters to the default values. + * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases + * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot + * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols + * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.
+ *
+ * + * \see MatrixBase for the majority of the API methods for matrices, \ref TopicClassHierarchy, + * \ref TopicStorageOrders + */ + +namespace internal { +template +struct traits > +{ + typedef _Scalar Scalar; + typedef Dense StorageKind; + typedef DenseIndex Index; + typedef MatrixXpr XprKind; + enum { + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _MaxRows, + MaxColsAtCompileTime = _MaxCols, + Flags = compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, + CoeffReadCost = NumTraits::ReadCost, + Options = _Options, + InnerStrideAtCompileTime = 1, + OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime + }; +}; +} + +template +class Matrix + : public PlainObjectBase > +{ + public: + + /** \brief Base class typedef. + * \sa PlainObjectBase + */ + typedef PlainObjectBase Base; + + enum { Options = _Options }; + + EIGEN_DENSE_PUBLIC_INTERFACE(Matrix) + + typedef typename Base::PlainObject PlainObject; + + using Base::base; + using Base::coeffRef; + + /** + * \brief Assigns matrices to each other. + * + * \note This is a special case of the templated operator=. Its purpose is + * to prevent a default operator= from hiding the templated operator=. + * + * \callgraph + */ + EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) + { + return Base::_set(other); + } + + /** \internal + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase& other) + { + return Base::_set(other); + } + + /* Here, doxygen failed to copy the brief information when using \copydoc */ + + /** + * \brief Copies the generic expression \a other into *this. + * \copydetails DenseBase::operator=(const EigenBase &other) + */ + template + EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase &other) + { + return Base::operator=(other); + } + + template + EIGEN_STRONG_INLINE Matrix& operator=(const ReturnByValue& func) + { + return Base::operator=(func); + } + + /** \brief Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(Index,Index) + */ + EIGEN_STRONG_INLINE explicit Matrix() : Base() + { + Base::_check_template_params(); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + + // FIXME is it still needed + Matrix(internal::constructor_without_unaligned_array_assert) + : Base(internal::constructor_without_unaligned_array_assert()) + { Base::_check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED } + + /** \brief Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass the dimension here, so it makes more sense to use the default + * constructor Matrix() instead. + */ + EIGEN_STRONG_INLINE explicit Matrix(Index dim) + : Base(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix) + eigen_assert(dim >= 0); + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_STRONG_INLINE Matrix(const T0& x, const T1& y) + { + Base::_check_template_params(); + Base::template _init2(x, y); + } + #else + /** \brief Constructs an uninitialized matrix with \a rows rows and \a cols columns. + * + * This is useful for dynamic-size matrices. For fixed-size matrices, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. */ + Matrix(Index rows, Index cols); + /** \brief Constructs an initialized 2D vector with given coefficients */ + Matrix(const Scalar& x, const Scalar& y); + #endif + + /** \brief Constructs an initialized 3D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + } + /** \brief Constructs an initialized 4D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) + { + Base::_check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + m_storage.data()[3] = w; + } + + explicit Matrix(const Scalar *data); + + /** \brief Constructor copying the value of the expression \a other */ + template + EIGEN_STRONG_INLINE Matrix(const MatrixBase& other) + : Base(other.rows() * other.cols(), other.rows(), other.cols()) + { + // This test resides here, to bring the error messages closer to the user. Normally, these checks + // are performed deeply within the library, thus causing long and scary error traces. + EIGEN_STATIC_ASSERT((internal::is_same::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + Base::_check_template_params(); + Base::_set_noalias(other); + } + /** \brief Copy constructor */ + EIGEN_STRONG_INLINE Matrix(const Matrix& other) + : Base(other.rows() * other.cols(), other.rows(), other.cols()) + { + Base::_check_template_params(); + Base::_set_noalias(other); + } + /** \brief Copy constructor with in-place evaluation */ + template + EIGEN_STRONG_INLINE Matrix(const ReturnByValue& other) + { + Base::_check_template_params(); + Base::resize(other.rows(), other.cols()); + other.evalTo(*this); + } + + /** \brief Copy constructor for generic expressions. + * \sa MatrixBase::operator=(const EigenBase&) + */ + template + EIGEN_STRONG_INLINE Matrix(const EigenBase &other) + : Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) + { + Base::_check_template_params(); + Base::resize(other.rows(), other.cols()); + // FIXME/CHECK: isn't *this = other.derived() more efficient. it allows to + // go for pure _set() implementations, right? + *this = other; + } + + /** \internal + * \brief Override MatrixBase::swap() since for dynamic-sized matrices + * of same type it is enough to swap the data pointers. + */ + template + void swap(MatrixBase const & other) + { this->_swap(other.derived()); } + + inline Index innerStride() const { return 1; } + inline Index outerStride() const { return this->innerSize(); } + + /////////// Geometry module /////////// + + template + explicit Matrix(const RotationBase& r); + template + Matrix& operator=(const RotationBase& r); + + #ifdef EIGEN2_SUPPORT + template + explicit Matrix(const eigen2_RotationBase& r); + template + Matrix& operator=(const eigen2_RotationBase& r); + #endif + + // allow to extend Matrix outside Eigen + #ifdef EIGEN_MATRIX_PLUGIN + #include EIGEN_MATRIX_PLUGIN + #endif + + protected: + template + friend struct internal::conservative_resize_like_impl; + + using Base::m_storage; +}; + +/** \defgroup matrixtypedefs Global matrix typedefs + * + * \ingroup Core_Module + * + * Eigen defines several typedef shortcuts for most common matrix and vector types. + * + * The general patterns are the following: + * + * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. + * + * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is + * a fixed-size vector of 4 complex floats. + * + * \sa class Matrix + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Vector##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, Size) \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##Size##X##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##X##Size##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 2) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 3) \ +EIGEN_MAKE_FIXED_TYPEDEFS(Type, TypeSuffix, 4) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS +#undef EIGEN_MAKE_FIXED_TYPEDEFS + +} // end namespace Eigen + +#endif // EIGEN_MATRIX_H diff --git a/latan/Eigen/src/Core/MatrixBase.h b/latan/Eigen/src/Core/MatrixBase.h new file mode 100644 index 0000000..36ea2ce --- /dev/null +++ b/latan/Eigen/src/Core/MatrixBase.h @@ -0,0 +1,511 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2009 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_MATRIXBASE_H +#define EIGEN_MATRIXBASE_H + +namespace Eigen { + +/** \class MatrixBase + * \ingroup Core_Module + * + * \brief Base class for all dense matrices, vectors, and expressions + * + * This class is the base that is inherited by all matrix, vector, and related expression + * types. Most of the Eigen API is contained in this class, and its base classes. Other important + * classes for the Eigen API are Matrix, and VectorwiseOp. + * + * Note that some methods are defined in other modules such as the \ref LU_Module LU module + * for all functions related to matrix inversions. + * + * \tparam Derived is the derived type, e.g. a matrix type, or an expression, etc. + * + * When writing a function taking Eigen objects as argument, if you want your function + * to take as argument any matrix, vector, or expression, just let it take a + * MatrixBase argument. As an example, here is a function printFirstRow which, given + * a matrix, vector, or expression \a x, prints the first row of \a x. + * + * \code + template + void printFirstRow(const Eigen::MatrixBase& x) + { + cout << x.row(0) << endl; + } + * \endcode + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_MATRIXBASE_PLUGIN. + * + * \sa \ref TopicClassHierarchy + */ +template class MatrixBase + : public DenseBase +{ + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + typedef MatrixBase StorageBaseType; + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + + typedef DenseBase Base; + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + using Base::CoeffReadCost; + + using Base::derived; + using Base::const_cast_derived; + using Base::rows; + using Base::cols; + using Base::size; + using Base::coeff; + using Base::coeffRef; + using Base::lazyAssign; + using Base::eval; + using Base::operator+=; + using Base::operator-=; + using Base::operator*=; + using Base::operator/=; + + typedef typename Base::CoeffReturnType CoeffReturnType; + typedef typename Base::ConstTransposeReturnType ConstTransposeReturnType; + typedef typename Base::RowXpr RowXpr; + typedef typename Base::ColXpr ColXpr; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the size of the main diagonal, which is min(rows(),cols()). + * \sa rows(), cols(), SizeAtCompileTime. */ + inline Index diagonalSize() const { return (std::min)(rows(),cols()); } + + /** \brief The plain matrix type corresponding to this expression. + * + * This is not necessarily exactly the return type of eval(). In the case of plain matrices, + * the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed + * that the return type of eval() is either PlainObject or const PlainObject&. + */ + typedef Matrix::Scalar, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime, + AutoAlign | (internal::traits::Flags&RowMajorBit ? RowMajor : ColMajor), + internal::traits::MaxRowsAtCompileTime, + internal::traits::MaxColsAtCompileTime + > PlainObject; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,Derived> ConstantReturnType; + /** \internal the return type of MatrixBase::adjoint() */ + typedef typename internal::conditional::IsComplex, + CwiseUnaryOp, ConstTransposeReturnType>, + ConstTransposeReturnType + >::type AdjointReturnType; + /** \internal Return type of eigenvalues() */ + typedef Matrix, internal::traits::ColsAtCompileTime, 1, ColMajor> EigenvaluesReturnType; + /** \internal the return type of identity */ + typedef CwiseNullaryOp,Derived> IdentityReturnType; + /** \internal the return type of unit vectors */ + typedef Block, SquareMatrixType>, + internal::traits::RowsAtCompileTime, + internal::traits::ColsAtCompileTime> BasisReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + +#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::MatrixBase +# include "../plugins/CommonCwiseUnaryOps.h" +# include "../plugins/CommonCwiseBinaryOps.h" +# include "../plugins/MatrixCwiseUnaryOps.h" +# include "../plugins/MatrixCwiseBinaryOps.h" +# ifdef EIGEN_MATRIXBASE_PLUGIN +# include EIGEN_MATRIXBASE_PLUGIN +# endif +#undef EIGEN_CURRENT_STORAGE_BASE_CLASS + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + Derived& operator=(const MatrixBase& other); + + // We cannot inherit here via Base::operator= since it is causing + // trouble with MSVC. + + template + Derived& operator=(const DenseBase& other); + + template + Derived& operator=(const EigenBase& other); + + template + Derived& operator=(const ReturnByValue& other); + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + Derived& lazyAssign(const ProductBase& other); +#endif // not EIGEN_PARSED_BY_DOXYGEN + + template + Derived& operator+=(const MatrixBase& other); + template + Derived& operator-=(const MatrixBase& other); + + template + const typename ProductReturnType::Type + operator*(const MatrixBase &other) const; + + template + const typename LazyProductReturnType::Type + lazyProduct(const MatrixBase &other) const; + + template + Derived& operator*=(const EigenBase& other); + + template + void applyOnTheLeft(const EigenBase& other); + + template + void applyOnTheRight(const EigenBase& other); + + template + const DiagonalProduct + operator*(const DiagonalBase &diagonal) const; + + template + typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType + dot(const MatrixBase& other) const; + + #ifdef EIGEN2_SUPPORT + template + Scalar eigen2_dot(const MatrixBase& other) const; + #endif + + RealScalar squaredNorm() const; + RealScalar norm() const; + RealScalar stableNorm() const; + RealScalar blueNorm() const; + RealScalar hypotNorm() const; + const PlainObject normalized() const; + void normalize(); + + const AdjointReturnType adjoint() const; + void adjointInPlace(); + + typedef Diagonal DiagonalReturnType; + DiagonalReturnType diagonal(); + typedef const Diagonal ConstDiagonalReturnType; + const ConstDiagonalReturnType diagonal() const; + + template struct DiagonalIndexReturnType { typedef Diagonal Type; }; + template struct ConstDiagonalIndexReturnType { typedef const Diagonal Type; }; + + template typename DiagonalIndexReturnType::Type diagonal(); + template typename ConstDiagonalIndexReturnType::Type diagonal() const; + + // Note: The "MatrixBase::" prefixes are added to help MSVC9 to match these declarations with the later implementations. + // On the other hand they confuse MSVC8... + #if (defined _MSC_VER) && (_MSC_VER >= 1500) // 2008 or later + typename MatrixBase::template DiagonalIndexReturnType::Type diagonal(Index index); + typename MatrixBase::template ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + #else + typename DiagonalIndexReturnType::Type diagonal(Index index); + typename ConstDiagonalIndexReturnType::Type diagonal(Index index) const; + #endif + + #ifdef EIGEN2_SUPPORT + template typename internal::eigen2_part_return_type::type part(); + template const typename internal::eigen2_part_return_type::type part() const; + + // huuuge hack. make Eigen2's matrix.part() work in eigen3. Problem: Diagonal is now a class template instead + // of an integer constant. Solution: overload the part() method template wrt template parameters list. + template class U> + const DiagonalWrapper part() const + { return diagonal().asDiagonal(); } + #endif // EIGEN2_SUPPORT + + template struct TriangularViewReturnType { typedef TriangularView Type; }; + template struct ConstTriangularViewReturnType { typedef const TriangularView Type; }; + + template typename TriangularViewReturnType::Type triangularView(); + template typename ConstTriangularViewReturnType::Type triangularView() const; + + template struct SelfAdjointViewReturnType { typedef SelfAdjointView Type; }; + template struct ConstSelfAdjointViewReturnType { typedef const SelfAdjointView Type; }; + + template typename SelfAdjointViewReturnType::Type selfadjointView(); + template typename ConstSelfAdjointViewReturnType::Type selfadjointView() const; + + const SparseView sparseView(const Scalar& m_reference = Scalar(0), + typename NumTraits::Real m_epsilon = NumTraits::dummy_precision()) const; + static const IdentityReturnType Identity(); + static const IdentityReturnType Identity(Index rows, Index cols); + static const BasisReturnType Unit(Index size, Index i); + static const BasisReturnType Unit(Index i); + static const BasisReturnType UnitX(); + static const BasisReturnType UnitY(); + static const BasisReturnType UnitZ(); + static const BasisReturnType UnitW(); + + const DiagonalWrapper asDiagonal() const; + const PermutationWrapper asPermutation() const; + + Derived& setIdentity(); + Derived& setIdentity(Index rows, Index cols); + + bool isIdentity(RealScalar prec = NumTraits::dummy_precision()) const; + bool isDiagonal(RealScalar prec = NumTraits::dummy_precision()) const; + + bool isUpperTriangular(RealScalar prec = NumTraits::dummy_precision()) const; + bool isLowerTriangular(RealScalar prec = NumTraits::dummy_precision()) const; + + template + bool isOrthogonal(const MatrixBase& other, + RealScalar prec = NumTraits::dummy_precision()) const; + bool isUnitary(RealScalar prec = NumTraits::dummy_precision()) const; + + /** \returns true if each coefficients of \c *this and \a other are all exactly equal. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator!= */ + template + inline bool operator==(const MatrixBase& other) const + { return cwiseEqual(other).all(); } + + /** \returns true if at least one pair of coefficients of \c *this and \a other are not exactly equal to each other. + * \warning When using floating point scalar values you probably should rather use a + * fuzzy comparison such as isApprox() + * \sa isApprox(), operator== */ + template + inline bool operator!=(const MatrixBase& other) const + { return cwiseNotEqual(other).any(); } + + NoAlias noalias(); + + inline const ForceAlignedAccess forceAlignedAccess() const; + inline ForceAlignedAccess forceAlignedAccess(); + template inline typename internal::add_const_on_value_type,Derived&>::type>::type forceAlignedAccessIf() const; + template inline typename internal::conditional,Derived&>::type forceAlignedAccessIf(); + + Scalar trace() const; + +/////////// Array module /////////// + + template RealScalar lpNorm() const; + + MatrixBase& matrix() { return *this; } + const MatrixBase& matrix() const { return *this; } + + /** \returns an \link ArrayBase Array \endlink expression of this matrix + * \sa ArrayBase::matrix() */ + ArrayWrapper array() { return derived(); } + const ArrayWrapper array() const { return derived(); } + +/////////// LU module /////////// + + const FullPivLU fullPivLu() const; + const PartialPivLU partialPivLu() const; + + #if EIGEN2_SUPPORT_STAGE < STAGE20_RESOLVE_API_CONFLICTS + const LU lu() const; + #endif + + #ifdef EIGEN2_SUPPORT + const LU eigen2_lu() const; + #endif + + #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS + const PartialPivLU lu() const; + #endif + + #ifdef EIGEN2_SUPPORT + template + void computeInverse(MatrixBase *result) const { + *result = this->inverse(); + } + #endif + + const internal::inverse_impl inverse() const; + template + void computeInverseAndDetWithCheck( + ResultType& inverse, + typename ResultType::Scalar& determinant, + bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() + ) const; + template + void computeInverseWithCheck( + ResultType& inverse, + bool& invertible, + const RealScalar& absDeterminantThreshold = NumTraits::dummy_precision() + ) const; + Scalar determinant() const; + +/////////// Cholesky module /////////// + + const LLT llt() const; + const LDLT ldlt() const; + +/////////// QR module /////////// + + const HouseholderQR householderQr() const; + const ColPivHouseholderQR colPivHouseholderQr() const; + const FullPivHouseholderQR fullPivHouseholderQr() const; + + #ifdef EIGEN2_SUPPORT + const QR qr() const; + #endif + + EigenvaluesReturnType eigenvalues() const; + RealScalar operatorNorm() const; + +/////////// SVD module /////////// + + JacobiSVD jacobiSvd(unsigned int computationOptions = 0) const; + + #ifdef EIGEN2_SUPPORT + SVD svd() const; + #endif + +/////////// Geometry module /////////// + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /// \internal helper struct to form the return type of the cross product + template struct cross_product_return_type { + typedef typename internal::scalar_product_traits::Scalar,typename internal::traits::Scalar>::ReturnType Scalar; + typedef Matrix type; + }; + #endif // EIGEN_PARSED_BY_DOXYGEN + template + typename cross_product_return_type::type + cross(const MatrixBase& other) const; + template + PlainObject cross3(const MatrixBase& other) const; + PlainObject unitOrthogonal(void) const; + Matrix eulerAngles(Index a0, Index a1, Index a2) const; + + #if EIGEN2_SUPPORT_STAGE > STAGE20_RESOLVE_API_CONFLICTS + ScalarMultipleReturnType operator*(const UniformScaling& s) const; + // put this as separate enum value to work around possible GCC 4.3 bug (?) + enum { HomogeneousReturnTypeDirection = ColsAtCompileTime==1?Vertical:Horizontal }; + typedef Homogeneous HomogeneousReturnType; + HomogeneousReturnType homogeneous() const; + #endif + + enum { + SizeMinusOne = SizeAtCompileTime==Dynamic ? Dynamic : SizeAtCompileTime-1 + }; + typedef Block::ColsAtCompileTime==1 ? SizeMinusOne : 1, + internal::traits::ColsAtCompileTime==1 ? 1 : SizeMinusOne> ConstStartMinusOne; + typedef CwiseUnaryOp::Scalar>, + const ConstStartMinusOne > HNormalizedReturnType; + + const HNormalizedReturnType hnormalized() const; + +////////// Householder module /////////// + + void makeHouseholderInPlace(Scalar& tau, RealScalar& beta); + template + void makeHouseholder(EssentialPart& essential, + Scalar& tau, RealScalar& beta) const; + template + void applyHouseholderOnTheLeft(const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace); + template + void applyHouseholderOnTheRight(const EssentialPart& essential, + const Scalar& tau, + Scalar* workspace); + +///////// Jacobi module ///////// + + template + void applyOnTheLeft(Index p, Index q, const JacobiRotation& j); + template + void applyOnTheRight(Index p, Index q, const JacobiRotation& j); + +///////// MatrixFunctions module ///////// + + typedef typename internal::stem_function::type StemFunction; + const MatrixExponentialReturnValue exp() const; + const MatrixFunctionReturnValue matrixFunction(StemFunction f) const; + const MatrixFunctionReturnValue cosh() const; + const MatrixFunctionReturnValue sinh() const; + const MatrixFunctionReturnValue cos() const; + const MatrixFunctionReturnValue sin() const; + const MatrixSquareRootReturnValue sqrt() const; + const MatrixLogarithmReturnValue log() const; + +#ifdef EIGEN2_SUPPORT + template + Derived& operator+=(const Flagged, 0, + EvalBeforeAssigningBit>& other); + + template + Derived& operator-=(const Flagged, 0, + EvalBeforeAssigningBit>& other); + + /** \deprecated because .lazy() is deprecated + * Overloaded for cache friendly product evaluation */ + template + Derived& lazyAssign(const Flagged& other) + { return lazyAssign(other._expression()); } + + template + const Flagged marked() const; + const Flagged lazy() const; + + inline const Cwise cwise() const; + inline Cwise cwise(); + + VectorBlock start(Index size); + const VectorBlock start(Index size) const; + VectorBlock end(Index size); + const VectorBlock end(Index size) const; + template VectorBlock start(); + template const VectorBlock start() const; + template VectorBlock end(); + template const VectorBlock end() const; + + Minor minor(Index row, Index col); + const Minor minor(Index row, Index col) const; +#endif + + protected: + MatrixBase() : Base() {} + + private: + explicit MatrixBase(int); + MatrixBase(int,int); + template explicit MatrixBase(const MatrixBase&); + protected: + // mixing arrays and matrices is not legal + template Derived& operator+=(const ArrayBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} + // mixing arrays and matrices is not legal + template Derived& operator-=(const ArrayBase& ) + {EIGEN_STATIC_ASSERT(std::ptrdiff_t(sizeof(typename OtherDerived::Scalar))==-1,YOU_CANNOT_MIX_ARRAYS_AND_MATRICES); return *this;} +}; + +} // end namespace Eigen + +#endif // EIGEN_MATRIXBASE_H diff --git a/latan/Eigen/src/Core/NestByValue.h b/latan/Eigen/src/Core/NestByValue.h new file mode 100644 index 0000000..a893b17 --- /dev/null +++ b/latan/Eigen/src/Core/NestByValue.h @@ -0,0 +1,111 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NESTBYVALUE_H +#define EIGEN_NESTBYVALUE_H + +namespace Eigen { + +/** \class NestByValue + * \ingroup Core_Module + * + * \brief Expression which must be nested by value + * + * \param ExpressionType the type of the object of which we are requiring nesting-by-value + * + * This class is the return type of MatrixBase::nestByValue() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::nestByValue() + */ + +namespace internal { +template +struct traits > : public traits +{}; +} + +template class NestByValue + : public internal::dense_xpr_base< NestByValue >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(NestByValue) + + inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} + + inline Index rows() const { return m_expression.rows(); } + inline Index cols() const { return m_expression.cols(); } + inline Index outerStride() const { return m_expression.outerStride(); } + inline Index innerStride() const { return m_expression.innerStride(); } + + inline const CoeffReturnType coeff(Index row, Index col) const + { + return m_expression.coeff(row, col); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline const CoeffReturnType coeff(Index index) const + { + return m_expression.coeff(index); + } + + inline Scalar& coeffRef(Index index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(Index index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + operator const ExpressionType&() const { return m_expression; } + + protected: + const ExpressionType m_expression; +}; + +/** \returns an expression of the temporary version of *this. + */ +template +inline const NestByValue +DenseBase::nestByValue() const +{ + return NestByValue(derived()); +} + +} // end namespace Eigen + +#endif // EIGEN_NESTBYVALUE_H diff --git a/latan/Eigen/src/Core/NoAlias.h b/latan/Eigen/src/Core/NoAlias.h new file mode 100644 index 0000000..ecb3fa2 --- /dev/null +++ b/latan/Eigen/src/Core/NoAlias.h @@ -0,0 +1,125 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NOALIAS_H +#define EIGEN_NOALIAS_H + +namespace Eigen { + +/** \class NoAlias + * \ingroup Core_Module + * + * \brief Pseudo expression providing an operator = assuming no aliasing + * + * \param ExpressionType the type of the object on which to do the lazy assignment + * + * This class represents an expression with special assignment operators + * assuming no aliasing between the target expression and the source expression. + * More precisely it alloas to bypass the EvalBeforeAssignBit flag of the source expression. + * It is the return type of MatrixBase::noalias() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::noalias() + */ +template class StorageBase> +class NoAlias +{ + typedef typename ExpressionType::Scalar Scalar; + public: + NoAlias(ExpressionType& expression) : m_expression(expression) {} + + /** Behaves like MatrixBase::lazyAssign(other) + * \sa MatrixBase::lazyAssign() */ + template + EIGEN_STRONG_INLINE ExpressionType& operator=(const StorageBase& other) + { return internal::assign_selector::run(m_expression,other.derived()); } + + /** \sa MatrixBase::operator+= */ + template + EIGEN_STRONG_INLINE ExpressionType& operator+=(const StorageBase& other) + { + typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; + SelfAdder tmp(m_expression); + typedef typename internal::nested::type OtherDerivedNested; + typedef typename internal::remove_all::type _OtherDerivedNested; + internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); + return m_expression; + } + + /** \sa MatrixBase::operator-= */ + template + EIGEN_STRONG_INLINE ExpressionType& operator-=(const StorageBase& other) + { + typedef SelfCwiseBinaryOp, ExpressionType, OtherDerived> SelfAdder; + SelfAdder tmp(m_expression); + typedef typename internal::nested::type OtherDerivedNested; + typedef typename internal::remove_all::type _OtherDerivedNested; + internal::assign_selector::run(tmp,OtherDerivedNested(other.derived())); + return m_expression; + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + EIGEN_STRONG_INLINE ExpressionType& operator+=(const ProductBase& other) + { other.derived().addTo(m_expression); return m_expression; } + + template + EIGEN_STRONG_INLINE ExpressionType& operator-=(const ProductBase& other) + { other.derived().subTo(m_expression); return m_expression; } + + template + EIGEN_STRONG_INLINE ExpressionType& operator+=(const CoeffBasedProduct& other) + { return m_expression.derived() += CoeffBasedProduct(other.lhs(), other.rhs()); } + + template + EIGEN_STRONG_INLINE ExpressionType& operator-=(const CoeffBasedProduct& other) + { return m_expression.derived() -= CoeffBasedProduct(other.lhs(), other.rhs()); } +#endif + + protected: + ExpressionType& m_expression; +}; + +/** \returns a pseudo expression of \c *this with an operator= assuming + * no aliasing between \c *this and the source expression. + * + * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. + * Currently, even though several expressions may alias, only product + * expressions have this flag. Therefore, noalias() is only usefull when + * the source expression contains a matrix product. + * + * Here are some examples where noalias is usefull: + * \code + * D.noalias() = A * B; + * D.noalias() += A.transpose() * B; + * D.noalias() -= 2 * A * B.adjoint(); + * \endcode + * + * On the other hand the following example will lead to a \b wrong result: + * \code + * A.noalias() = A * B; + * \endcode + * because the result matrix A is also an operand of the matrix product. Therefore, + * there is no alternative than evaluating A * B in a temporary, that is the default + * behavior when you write: + * \code + * A = A * B; + * \endcode + * + * \sa class NoAlias + */ +template +NoAlias MatrixBase::noalias() +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_NOALIAS_H diff --git a/latan/Eigen/src/Core/NumTraits.h b/latan/Eigen/src/Core/NumTraits.h new file mode 100644 index 0000000..c94ef02 --- /dev/null +++ b/latan/Eigen/src/Core/NumTraits.h @@ -0,0 +1,147 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_NUMTRAITS_H +#define EIGEN_NUMTRAITS_H + +namespace Eigen { + +/** \class NumTraits + * \ingroup Core_Module + * + * \brief Holds information about the various numeric (i.e. scalar) types allowed by Eigen. + * + * \param T the numeric type at hand + * + * This class stores enums, typedefs and static methods giving information about a numeric type. + * + * The provided data consists of: + * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real, + * then \a Real is just a typedef to \a T. If \a T is \c std::complex then \a Real + * is a typedef to \a U. + * \li A typedef \a NonInteger, giving the type that should be used for operations producing non-integral values, + * such as quotients, square roots, etc. If \a T is a floating-point type, then this typedef just gives + * \a T again. Note however that many Eigen functions such as internal::sqrt simply refuse to + * take integers. Outside of a few cases, Eigen doesn't do automatic type promotion. Thus, this typedef is + * only intended as a helper for code that needs to explicitly promote types. + * \li A typedef \a Nested giving the type to use to nest a value inside of the expression tree. If you don't know what + * this means, just use \a T here. + * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex + * type, and to 0 otherwise. + * \li An enum value \a IsInteger. It is equal to \c 1 if \a T is an integer type such as \c int, + * and to \c 0 otherwise. + * \li Enum values ReadCost, AddCost and MulCost representing a rough estimate of the number of CPU cycles needed + * to by move / add / mul instructions respectively, assuming the data is already stored in CPU registers. + * Stay vague here. No need to do architecture-specific stuff. + * \li An enum value \a IsSigned. It is equal to \c 1 if \a T is a signed type and to 0 if \a T is unsigned. + * \li An enum value \a RequireInitialization. It is equal to \c 1 if the constructor of the numeric type \a T must + * be called, and to 0 if it is safe not to call it. Default is 0 if \a T is an arithmetic type, and 1 otherwise. + * \li An epsilon() function which, unlike std::numeric_limits::epsilon(), returns a \a Real instead of a \a T. + * \li A dummy_precision() function returning a weak epsilon value. It is mainly used as a default + * value by the fuzzy comparison operators. + * \li highest() and lowest() functions returning the highest and lowest possible values respectively. + */ + +template struct GenericNumTraits +{ + enum { + IsInteger = std::numeric_limits::is_integer, + IsSigned = std::numeric_limits::is_signed, + IsComplex = 0, + RequireInitialization = internal::is_arithmetic::value ? 0 : 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; + + typedef T Real; + typedef typename internal::conditional< + IsInteger, + typename internal::conditional::type, + T + >::type NonInteger; + typedef T Nested; + + static inline Real epsilon() { return std::numeric_limits::epsilon(); } + static inline Real dummy_precision() + { + // make sure to override this for floating-point types + return Real(0); + } + static inline T highest() { return (std::numeric_limits::max)(); } + static inline T lowest() { return IsInteger ? (std::numeric_limits::min)() : (-(std::numeric_limits::max)()); } + +#ifdef EIGEN2_SUPPORT + enum { + HasFloatingPoint = !IsInteger + }; + typedef NonInteger FloatingPoint; +#endif +}; + +template struct NumTraits : GenericNumTraits +{}; + +template<> struct NumTraits + : GenericNumTraits +{ + static inline float dummy_precision() { return 1e-5f; } +}; + +template<> struct NumTraits : GenericNumTraits +{ + static inline double dummy_precision() { return 1e-12; } +}; + +template<> struct NumTraits + : GenericNumTraits +{ + static inline long double dummy_precision() { return 1e-15l; } +}; + +template struct NumTraits > + : GenericNumTraits > +{ + typedef _Real Real; + enum { + IsComplex = 1, + RequireInitialization = NumTraits<_Real>::RequireInitialization, + ReadCost = 2 * NumTraits<_Real>::ReadCost, + AddCost = 2 * NumTraits::AddCost, + MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost + }; + + static inline Real epsilon() { return NumTraits::epsilon(); } + static inline Real dummy_precision() { return NumTraits::dummy_precision(); } +}; + +template +struct NumTraits > +{ + typedef Array ArrayType; + typedef typename NumTraits::Real RealScalar; + typedef Array Real; + typedef typename NumTraits::NonInteger NonIntegerScalar; + typedef Array NonInteger; + typedef ArrayType & Nested; + + enum { + IsComplex = NumTraits::IsComplex, + IsInteger = NumTraits::IsInteger, + IsSigned = NumTraits::IsSigned, + RequireInitialization = 1, + ReadCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::ReadCost, + AddCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::AddCost, + MulCost = ArrayType::SizeAtCompileTime==Dynamic ? Dynamic : ArrayType::SizeAtCompileTime * NumTraits::MulCost + }; +}; + +} // end namespace Eigen + +#endif // EIGEN_NUMTRAITS_H diff --git a/latan/Eigen/src/Core/PermutationMatrix.h b/latan/Eigen/src/Core/PermutationMatrix.h new file mode 100644 index 0000000..60a05c8 --- /dev/null +++ b/latan/Eigen/src/Core/PermutationMatrix.h @@ -0,0 +1,687 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Benoit Jacob +// Copyright (C) 2009-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PERMUTATIONMATRIX_H +#define EIGEN_PERMUTATIONMATRIX_H + +namespace Eigen { + +template class PermutedImpl; + +/** \class PermutationBase + * \ingroup Core_Module + * + * \brief Base class for permutations + * + * \param Derived the derived class + * + * This class is the base class for all expressions representing a permutation matrix, + * internally stored as a vector of integers. + * The convention followed here is that if \f$ \sigma \f$ is a permutation, the corresponding permutation matrix + * \f$ P_\sigma \f$ is such that if \f$ (e_1,\ldots,e_p) \f$ is the canonical basis, we have: + * \f[ P_\sigma(e_i) = e_{\sigma(i)}. \f] + * This convention ensures that for any two permutations \f$ \sigma, \tau \f$, we have: + * \f[ P_{\sigma\circ\tau} = P_\sigma P_\tau. \f] + * + * Permutation matrices are square and invertible. + * + * Notice that in addition to the member functions and operators listed here, there also are non-member + * operator* to multiply any kind of permutation object with any kind of matrix expression (MatrixBase) + * on either side. + * + * \sa class PermutationMatrix, class PermutationWrapper + */ + +namespace internal { + +template +struct permut_matrix_product_retval; +template +struct permut_sparsematrix_product_retval; +enum PermPermProduct_t {PermPermProduct}; + +} // end namespace internal + +template +class PermutationBase : public EigenBase +{ + typedef internal::traits Traits; + typedef EigenBase Base; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + enum { + Flags = Traits::Flags, + CoeffReadCost = Traits::CoeffReadCost, + RowsAtCompileTime = Traits::RowsAtCompileTime, + ColsAtCompileTime = Traits::ColsAtCompileTime, + MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Traits::MaxColsAtCompileTime + }; + typedef typename Traits::Scalar Scalar; + typedef typename Traits::Index Index; + typedef Matrix + DenseMatrixType; + typedef PermutationMatrix + PlainPermutationType; + using Base::derived; + #endif + + /** Copies the other permutation into *this */ + template + Derived& operator=(const PermutationBase& other) + { + indices() = other.indices(); + return derived(); + } + + /** Assignment from the Transpositions \a tr */ + template + Derived& operator=(const TranspositionsBase& tr) + { + setIdentity(tr.size()); + for(Index k=size()-1; k>=0; --k) + applyTranspositionOnTheRight(k,tr.coeff(k)); + return derived(); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Derived& operator=(const PermutationBase& other) + { + indices() = other.indices(); + return derived(); + } + #endif + + /** \returns the number of rows */ + inline Index rows() const { return Index(indices().size()); } + + /** \returns the number of columns */ + inline Index cols() const { return Index(indices().size()); } + + /** \returns the size of a side of the respective square matrix, i.e., the number of indices */ + inline Index size() const { return Index(indices().size()); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const + { + other.setZero(); + for (int i=0; i=0 && j>=0 && i=0 && j>=0 && i inverse() const + { return derived(); } + /** \returns the tranpose permutation matrix. + * + * \note \note_try_to_help_rvo + */ + inline Transpose transpose() const + { return derived(); } + + /**** multiplication helpers to hopefully get RVO ****/ + + +#ifndef EIGEN_PARSED_BY_DOXYGEN + protected: + template + void assignTranspose(const PermutationBase& other) + { + for (int i=0; i + void assignProduct(const Lhs& lhs, const Rhs& rhs) + { + eigen_assert(lhs.cols() == rhs.rows()); + for (int i=0; i + inline PlainPermutationType operator*(const PermutationBase& other) const + { return PlainPermutationType(internal::PermPermProduct, derived(), other.derived()); } + + /** \returns the product of a permutation with another inverse permutation. + * + * \note \note_try_to_help_rvo + */ + template + inline PlainPermutationType operator*(const Transpose >& other) const + { return PlainPermutationType(internal::PermPermProduct, *this, other.eval()); } + + /** \returns the product of an inverse permutation with another permutation. + * + * \note \note_try_to_help_rvo + */ + template friend + inline PlainPermutationType operator*(const Transpose >& other, const PermutationBase& perm) + { return PlainPermutationType(internal::PermPermProduct, other.eval(), perm); } + + protected: + +}; + +/** \class PermutationMatrix + * \ingroup Core_Module + * + * \brief Permutation matrix + * + * \param SizeAtCompileTime the number of rows/cols, or Dynamic + * \param MaxSizeAtCompileTime the maximum number of rows/cols, or Dynamic. This optional parameter defaults to SizeAtCompileTime. Most of the time, you should not have to specify it. + * \param IndexType the interger type of the indices + * + * This class represents a permutation matrix, internally stored as a vector of integers. + * + * \sa class PermutationBase, class PermutationWrapper, class DiagonalMatrix + */ + +namespace internal { +template +struct traits > + : traits > +{ + typedef IndexType Index; + typedef Matrix IndicesType; +}; +} + +template +class PermutationMatrix : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + #endif + + inline PermutationMatrix() + {} + + /** Constructs an uninitialized permutation matrix of given size. + */ + inline PermutationMatrix(int size) : m_indices(size) + {} + + /** Copy constructor. */ + template + inline PermutationMatrix(const PermutationBase& other) + : m_indices(other.indices()) {} + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** Standard copy constructor. Defined only to prevent a default copy constructor + * from hiding the other templated constructor */ + inline PermutationMatrix(const PermutationMatrix& other) : m_indices(other.indices()) {} + #endif + + /** Generic constructor from expression of the indices. The indices + * array has the meaning that the permutations sends each integer i to indices[i]. + * + * \warning It is your responsibility to check that the indices array that you passes actually + * describes a permutation, i.e., each value between 0 and n-1 occurs exactly once, where n is the + * array's size. + */ + template + explicit inline PermutationMatrix(const MatrixBase& indices) : m_indices(indices) + {} + + /** Convert the Transpositions \a tr to a permutation matrix */ + template + explicit PermutationMatrix(const TranspositionsBase& tr) + : m_indices(tr.size()) + { + *this = tr; + } + + /** Copies the other permutation into *this */ + template + PermutationMatrix& operator=(const PermutationBase& other) + { + m_indices = other.indices(); + return *this; + } + + /** Assignment from the Transpositions \a tr */ + template + PermutationMatrix& operator=(const TranspositionsBase& tr) + { + return Base::operator=(tr.derived()); + } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + PermutationMatrix& operator=(const PermutationMatrix& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + + /**** multiplication helpers to hopefully get RVO ****/ + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + PermutationMatrix(const Transpose >& other) + : m_indices(other.nestedPermutation().size()) + { + for (int i=0; i + PermutationMatrix(internal::PermPermProduct_t, const Lhs& lhs, const Rhs& rhs) + : m_indices(lhs.indices().size()) + { + Base::assignProduct(lhs,rhs); + } +#endif + + protected: + + IndicesType m_indices; +}; + + +namespace internal { +template +struct traits,_PacketAccess> > + : traits > +{ + typedef IndexType Index; + typedef Map, _PacketAccess> IndicesType; +}; +} + +template +class Map,_PacketAccess> + : public PermutationBase,_PacketAccess> > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + typedef typename IndicesType::Scalar Index; + #endif + + inline Map(const Index* indices) + : m_indices(indices) + {} + + inline Map(const Index* indices, Index size) + : m_indices(indices,size) + {} + + /** Copies the other permutation into *this */ + template + Map& operator=(const PermutationBase& other) + { return Base::operator=(other.derived()); } + + /** Assignment from the Transpositions \a tr */ + template + Map& operator=(const TranspositionsBase& tr) + { return Base::operator=(tr.derived()); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + Map& operator=(const Map& other) + { + m_indices = other.m_indices; + return *this; + } + #endif + + /** const version of indices(). */ + const IndicesType& indices() const { return m_indices; } + /** \returns a reference to the stored array representing the permutation. */ + IndicesType& indices() { return m_indices; } + + protected: + + IndicesType m_indices; +}; + +/** \class PermutationWrapper + * \ingroup Core_Module + * + * \brief Class to view a vector of integers as a permutation matrix + * + * \param _IndicesType the type of the vector of integer (can be any compatible expression) + * + * This class allows to view any vector expression of integers as a permutation matrix. + * + * \sa class PermutationBase, class PermutationMatrix + */ + +struct PermutationStorage {}; + +template class TranspositionsWrapper; +namespace internal { +template +struct traits > +{ + typedef PermutationStorage StorageKind; + typedef typename _IndicesType::Scalar Scalar; + typedef typename _IndicesType::Scalar Index; + typedef _IndicesType IndicesType; + enum { + RowsAtCompileTime = _IndicesType::SizeAtCompileTime, + ColsAtCompileTime = _IndicesType::SizeAtCompileTime, + MaxRowsAtCompileTime = IndicesType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = IndicesType::MaxColsAtCompileTime, + Flags = 0, + CoeffReadCost = _IndicesType::CoeffReadCost + }; +}; +} + +template +class PermutationWrapper : public PermutationBase > +{ + typedef PermutationBase Base; + typedef internal::traits Traits; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef typename Traits::IndicesType IndicesType; + #endif + + inline PermutationWrapper(const IndicesType& indices) + : m_indices(indices) + {} + + /** const version of indices(). */ + const typename internal::remove_all::type& + indices() const { return m_indices; } + + protected: + + typename IndicesType::Nested m_indices; +}; + +/** \returns the matrix with the permutation applied to the columns. + */ +template +inline const internal::permut_matrix_product_retval +operator*(const MatrixBase& matrix, + const PermutationBase &permutation) +{ + return internal::permut_matrix_product_retval + + (permutation.derived(), matrix.derived()); +} + +/** \returns the matrix with the permutation applied to the rows. + */ +template +inline const internal::permut_matrix_product_retval + +operator*(const PermutationBase &permutation, + const MatrixBase& matrix) +{ + return internal::permut_matrix_product_retval + + (permutation.derived(), matrix.derived()); +} + +namespace internal { + +template +struct traits > +{ + typedef typename MatrixType::PlainObject ReturnType; +}; + +template +struct permut_matrix_product_retval + : public ReturnByValue > +{ + typedef typename remove_all::type MatrixTypeNestedCleaned; + + permut_matrix_product_retval(const PermutationType& perm, const MatrixType& matrix) + : m_permutation(perm), m_matrix(matrix) + {} + + inline int rows() const { return m_matrix.rows(); } + inline int cols() const { return m_matrix.cols(); } + + template inline void evalTo(Dest& dst) const + { + const int n = Side==OnTheLeft ? rows() : cols(); + + if(is_same::value && extract_data(dst) == extract_data(m_matrix)) + { + // apply the permutation inplace + Matrix mask(m_permutation.size()); + mask.fill(false); + int r = 0; + while(r < m_permutation.size()) + { + // search for the next seed + while(r=m_permutation.size()) + break; + // we got one, let's follow it until we are back to the seed + int k0 = r++; + int kPrev = k0; + mask.coeffRef(k0) = true; + for(int k=m_permutation.indices().coeff(k0); k!=k0; k=m_permutation.indices().coeff(k)) + { + Block(dst, k) + .swap(Block + (dst,((Side==OnTheLeft) ^ Transposed) ? k0 : kPrev)); + + mask.coeffRef(k) = true; + kPrev = k; + } + } + } + else + { + for(int i = 0; i < n; ++i) + { + Block + (dst, ((Side==OnTheLeft) ^ Transposed) ? m_permutation.indices().coeff(i) : i) + + = + + Block + (m_matrix, ((Side==OnTheRight) ^ Transposed) ? m_permutation.indices().coeff(i) : i); + } + } + } + + protected: + const PermutationType& m_permutation; + typename MatrixType::Nested m_matrix; +}; + +/* Template partial specialization for transposed/inverse permutations */ + +template +struct traits > > + : traits +{}; + +} // end namespace internal + +template +class Transpose > + : public EigenBase > > +{ + typedef Derived PermutationType; + typedef typename PermutationType::IndicesType IndicesType; + typedef typename PermutationType::PlainPermutationType PlainPermutationType; + public: + + #ifndef EIGEN_PARSED_BY_DOXYGEN + typedef internal::traits Traits; + typedef typename Derived::DenseMatrixType DenseMatrixType; + enum { + Flags = Traits::Flags, + CoeffReadCost = Traits::CoeffReadCost, + RowsAtCompileTime = Traits::RowsAtCompileTime, + ColsAtCompileTime = Traits::ColsAtCompileTime, + MaxRowsAtCompileTime = Traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Traits::MaxColsAtCompileTime + }; + typedef typename Traits::Scalar Scalar; + #endif + + Transpose(const PermutationType& p) : m_permutation(p) {} + + inline int rows() const { return m_permutation.rows(); } + inline int cols() const { return m_permutation.cols(); } + + #ifndef EIGEN_PARSED_BY_DOXYGEN + template + void evalTo(MatrixBase& other) const + { + other.setZero(); + for (int i=0; i friend + inline const internal::permut_matrix_product_retval + operator*(const MatrixBase& matrix, const Transpose& trPerm) + { + return internal::permut_matrix_product_retval(trPerm.m_permutation, matrix.derived()); + } + + /** \returns the matrix with the inverse permutation applied to the rows. + */ + template + inline const internal::permut_matrix_product_retval + operator*(const MatrixBase& matrix) const + { + return internal::permut_matrix_product_retval(m_permutation, matrix.derived()); + } + + const PermutationType& nestedPermutation() const { return m_permutation; } + + protected: + const PermutationType& m_permutation; +}; + +template +const PermutationWrapper MatrixBase::asPermutation() const +{ + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_PERMUTATIONMATRIX_H diff --git a/latan/Eigen/src/Core/PlainObjectBase.h b/latan/Eigen/src/Core/PlainObjectBase.h new file mode 100644 index 0000000..cbe9e3b --- /dev/null +++ b/latan/Eigen/src/Core/PlainObjectBase.h @@ -0,0 +1,768 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_DENSESTORAGEBASE_H +#define EIGEN_DENSESTORAGEBASE_H + +#ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO +# define EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED for(int i=0;i +EIGEN_ALWAYS_INLINE void check_rows_cols_for_overflow(Index rows, Index cols) +{ + // http://hg.mozilla.org/mozilla-central/file/6c8a909977d3/xpcom/ds/CheckedInt.h#l242 + // we assume Index is signed + Index max_index = (size_t(1) << (8 * sizeof(Index) - 1)) - 1; // assume Index is signed + bool error = (rows < 0 || cols < 0) ? true + : (rows == 0 || cols == 0) ? false + : (rows > max_index / cols); + if (error) + throw_std_bad_alloc(); +} + +template struct conservative_resize_like_impl; + +template struct matrix_swap_impl; + +} // end namespace internal + +/** \class PlainObjectBase + * \brief %Dense storage base class for matrices and arrays. + * + * This class can be extended with the help of the plugin mechanism described on the page + * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_PLAINOBJECTBASE_PLUGIN. + * + * \sa \ref TopicClassHierarchy + */ +#ifdef EIGEN_PARSED_BY_DOXYGEN +namespace internal { + +// this is a warkaround to doxygen not being able to understand the inheritence logic +// when it is hidden by the dense_xpr_base helper struct. +template struct dense_xpr_base_dispatcher_for_doxygen;// : public MatrixBase {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher_for_doxygen > + : public MatrixBase > {}; +/** This class is just a workaround for Doxygen and it does not not actually exist. */ +template +struct dense_xpr_base_dispatcher_for_doxygen > + : public ArrayBase > {}; + +} // namespace internal + +template +class PlainObjectBase : public internal::dense_xpr_base_dispatcher_for_doxygen +#else +template +class PlainObjectBase : public internal::dense_xpr_base::type +#endif +{ + public: + enum { Options = internal::traits::Options }; + typedef typename internal::dense_xpr_base::type Base; + + typedef typename internal::traits::StorageKind StorageKind; + typedef typename internal::traits::Index Index; + typedef typename internal::traits::Scalar Scalar; + typedef typename internal::packet_traits::type PacketScalar; + typedef typename NumTraits::Real RealScalar; + typedef Derived DenseType; + + using Base::RowsAtCompileTime; + using Base::ColsAtCompileTime; + using Base::SizeAtCompileTime; + using Base::MaxRowsAtCompileTime; + using Base::MaxColsAtCompileTime; + using Base::MaxSizeAtCompileTime; + using Base::IsVectorAtCompileTime; + using Base::Flags; + + template friend class Eigen::Map; + friend class Eigen::Map; + typedef Eigen::Map MapType; + friend class Eigen::Map; + typedef const Eigen::Map ConstMapType; + friend class Eigen::Map; + typedef Eigen::Map AlignedMapType; + friend class Eigen::Map; + typedef const Eigen::Map ConstAlignedMapType; + template struct StridedMapType { typedef Eigen::Map type; }; + template struct StridedConstMapType { typedef Eigen::Map type; }; + template struct StridedAlignedMapType { typedef Eigen::Map type; }; + template struct StridedConstAlignedMapType { typedef Eigen::Map type; }; + + protected: + DenseStorage m_storage; + + public: + enum { NeedsToAlign = SizeAtCompileTime != Dynamic && (internal::traits::Flags & AlignedBit) != 0 }; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + + Base& base() { return *static_cast(this); } + const Base& base() const { return *static_cast(this); } + + EIGEN_STRONG_INLINE Index rows() const { return m_storage.rows(); } + EIGEN_STRONG_INLINE Index cols() const { return m_storage.cols(); } + + EIGEN_STRONG_INLINE const Scalar& coeff(Index row, Index col) const + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const + { + return m_storage.data()[index]; + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(Index row, Index col) + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) + { + return m_storage.data()[index]; + } + + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index row, Index col) const + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE const Scalar& coeffRef(Index index) const + { + return m_storage.data()[index]; + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index row, Index col) const + { + return internal::ploadt + (m_storage.data() + (Flags & RowMajorBit + ? col + row * m_storage.cols() + : row + col * m_storage.rows())); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE PacketScalar packet(Index index) const + { + return internal::ploadt(m_storage.data() + index); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index row, Index col, const PacketScalar& x) + { + internal::pstoret + (m_storage.data() + (Flags & RowMajorBit + ? col + row * m_storage.cols() + : row + col * m_storage.rows()), x); + } + + /** \internal */ + template + EIGEN_STRONG_INLINE void writePacket(Index index, const PacketScalar& x) + { + internal::pstoret(m_storage.data() + index, x); + } + + /** \returns a const pointer to the data array of this matrix */ + EIGEN_STRONG_INLINE const Scalar *data() const + { return m_storage.data(); } + + /** \returns a pointer to the data array of this matrix */ + EIGEN_STRONG_INLINE Scalar *data() + { return m_storage.data(); } + + /** Resizes \c *this to a \a rows x \a cols matrix. + * + * This method is intended for dynamic-size matrices, although it is legal to call it on any + * matrix as long as fixed dimensions are left unchanged. If you only want to change the number + * of rows and/or of columns, you can use resize(NoChange_t, Index), resize(Index, NoChange_t). + * + * If the current number of coefficients of \c *this exactly matches the + * product \a rows * \a cols, then no memory allocation is performed and + * the current values are left unchanged. In all other cases, including + * shrinking, the data is reallocated and all previous values are lost. + * + * Example: \include Matrix_resize_int_int.cpp + * Output: \verbinclude Matrix_resize_int_int.out + * + * \sa resize(Index) for vectors, resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + EIGEN_STRONG_INLINE void resize(Index rows, Index cols) + { + #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO + internal::check_rows_cols_for_overflow(rows, cols); + Index size = rows*cols; + bool size_changed = size != this->size(); + m_storage.resize(size, rows, cols); + if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + #else + internal::check_rows_cols_for_overflow(rows, cols); + m_storage.resize(rows*cols, rows, cols); + #endif + } + + /** Resizes \c *this to a vector of length \a size + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * Example: \include Matrix_resize_int.cpp + * Output: \verbinclude Matrix_resize_int.out + * + * \sa resize(Index,Index), resize(NoChange_t, Index), resize(Index, NoChange_t) + */ + inline void resize(Index size) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(PlainObjectBase) + eigen_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO + bool size_changed = size != this->size(); + #endif + if(RowsAtCompileTime == 1) + m_storage.resize(size, 1, size); + else + m_storage.resize(size, size, 1); + #ifdef EIGEN_INITIALIZE_MATRICES_BY_ZERO + if(size_changed) EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + #endif + } + + /** Resizes the matrix, changing only the number of columns. For the parameter of type NoChange_t, just pass the special value \c NoChange + * as in the example below. + * + * Example: \include Matrix_resize_NoChange_int.cpp + * Output: \verbinclude Matrix_resize_NoChange_int.out + * + * \sa resize(Index,Index) + */ + inline void resize(NoChange_t, Index cols) + { + resize(rows(), cols); + } + + /** Resizes the matrix, changing only the number of rows. For the parameter of type NoChange_t, just pass the special value \c NoChange + * as in the example below. + * + * Example: \include Matrix_resize_int_NoChange.cpp + * Output: \verbinclude Matrix_resize_int_NoChange.out + * + * \sa resize(Index,Index) + */ + inline void resize(Index rows, NoChange_t) + { + resize(rows, cols()); + } + + /** Resizes \c *this to have the same dimensions as \a other. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE void resizeLike(const EigenBase& _other) + { + const OtherDerived& other = _other.derived(); + internal::check_rows_cols_for_overflow(other.rows(), other.cols()); + const Index othersize = other.rows()*other.cols(); + if(RowsAtCompileTime == 1) + { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(1, othersize); + } + else if(ColsAtCompileTime == 1) + { + eigen_assert(other.rows() == 1 || other.cols() == 1); + resize(othersize, 1); + } + else resize(other.rows(), other.cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will be uninitialized. + */ + EIGEN_STRONG_INLINE void conservativeResize(Index rows, Index cols) + { + internal::conservative_resize_like_impl::run(*this, rows, cols); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of columns unchanged. + * + * In case the matrix is growing, new rows will be uninitialized. + */ + EIGEN_STRONG_INLINE void conservativeResize(Index rows, NoChange_t) + { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows, cols()); + } + + /** Resizes the matrix to \a rows x \a cols while leaving old values untouched. + * + * As opposed to conservativeResize(Index rows, Index cols), this version leaves + * the number of rows unchanged. + * + * In case the matrix is growing, new columns will be uninitialized. + */ + EIGEN_STRONG_INLINE void conservativeResize(NoChange_t, Index cols) + { + // Note: see the comment in conservativeResize(Index,Index) + conservativeResize(rows(), cols); + } + + /** Resizes the vector to \a size while retaining old values. + * + * \only_for_vectors. This method does not work for + * partially dynamic matrices when the static dimension is anything other + * than 1. For example it will not work with Matrix. + * + * When values are appended, they will be uninitialized. + */ + EIGEN_STRONG_INLINE void conservativeResize(Index size) + { + internal::conservative_resize_like_impl::run(*this, size); + } + + /** Resizes the matrix to \a rows x \a cols of \c other, while leaving old values untouched. + * + * The method is intended for matrices of dynamic size. If you only want to change the number + * of rows and/or of columns, you can use conservativeResize(NoChange_t, Index) or + * conservativeResize(Index, NoChange_t). + * + * Matrices are resized relative to the top-left element. In case values need to be + * appended to the matrix they will copied from \c other. + */ + template + EIGEN_STRONG_INLINE void conservativeResizeLike(const DenseBase& other) + { + internal::conservative_resize_like_impl::run(*this, other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_STRONG_INLINE Derived& operator=(const PlainObjectBase& other) + { + return _set(other); + } + + /** \sa MatrixBase::lazyAssign() */ + template + EIGEN_STRONG_INLINE Derived& lazyAssign(const DenseBase& other) + { + _resize_to_match(other); + return Base::lazyAssign(other.derived()); + } + + template + EIGEN_STRONG_INLINE Derived& operator=(const ReturnByValue& func) + { + resize(func.rows(), func.cols()); + return Base::operator=(func); + } + + EIGEN_STRONG_INLINE explicit PlainObjectBase() : m_storage() + { +// _check_template_params(); +// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + // FIXME is it still needed ? + /** \internal */ + PlainObjectBase(internal::constructor_without_unaligned_array_assert) + : m_storage(internal::constructor_without_unaligned_array_assert()) + { +// _check_template_params(); EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } +#endif + + EIGEN_STRONG_INLINE PlainObjectBase(Index size, Index rows, Index cols) + : m_storage(size, rows, cols) + { +// _check_template_params(); +// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + + /** \copydoc MatrixBase::operator=(const EigenBase&) + */ + template + EIGEN_STRONG_INLINE Derived& operator=(const EigenBase &other) + { + _resize_to_match(other); + Base::operator=(other.derived()); + return this->derived(); + } + + /** \sa MatrixBase::operator=(const EigenBase&) */ + template + EIGEN_STRONG_INLINE PlainObjectBase(const EigenBase &other) + : m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols()) + { + _check_template_params(); + internal::check_rows_cols_for_overflow(other.derived().rows(), other.derived().cols()); + Base::operator=(other.derived()); + } + + /** \name Map + * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, + * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned + * \a data pointers. + * + * \see class Map + */ + //@{ + static inline ConstMapType Map(const Scalar* data) + { return ConstMapType(data); } + static inline MapType Map(Scalar* data) + { return MapType(data); } + static inline ConstMapType Map(const Scalar* data, Index size) + { return ConstMapType(data, size); } + static inline MapType Map(Scalar* data, Index size) + { return MapType(data, size); } + static inline ConstMapType Map(const Scalar* data, Index rows, Index cols) + { return ConstMapType(data, rows, cols); } + static inline MapType Map(Scalar* data, Index rows, Index cols) + { return MapType(data, rows, cols); } + + static inline ConstAlignedMapType MapAligned(const Scalar* data) + { return ConstAlignedMapType(data); } + static inline AlignedMapType MapAligned(Scalar* data) + { return AlignedMapType(data); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index size) + { return ConstAlignedMapType(data, size); } + static inline AlignedMapType MapAligned(Scalar* data, Index size) + { return AlignedMapType(data, size); } + static inline ConstAlignedMapType MapAligned(const Scalar* data, Index rows, Index cols) + { return ConstAlignedMapType(data, rows, cols); } + static inline AlignedMapType MapAligned(Scalar* data, Index rows, Index cols) + { return AlignedMapType(data, rows, cols); } + + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, const Stride& stride) + { return typename StridedConstMapType >::type(data, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, const Stride& stride) + { return typename StridedMapType >::type(data, stride); } + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstMapType >::type(data, size, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, Index size, const Stride& stride) + { return typename StridedMapType >::type(data, size, stride); } + template + static inline typename StridedConstMapType >::type Map(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstMapType >::type(data, rows, cols, stride); } + template + static inline typename StridedMapType >::type Map(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedMapType >::type(data, rows, cols, stride); } + + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, stride); } + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index size, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, size, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index size, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, size, stride); } + template + static inline typename StridedConstAlignedMapType >::type MapAligned(const Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedConstAlignedMapType >::type(data, rows, cols, stride); } + template + static inline typename StridedAlignedMapType >::type MapAligned(Scalar* data, Index rows, Index cols, const Stride& stride) + { return typename StridedAlignedMapType >::type(data, rows, cols, stride); } + //@} + + using Base::setConstant; + Derived& setConstant(Index size, const Scalar& value); + Derived& setConstant(Index rows, Index cols, const Scalar& value); + + using Base::setZero; + Derived& setZero(Index size); + Derived& setZero(Index rows, Index cols); + + using Base::setOnes; + Derived& setOnes(Index size); + Derived& setOnes(Index rows, Index cols); + + using Base::setRandom; + Derived& setRandom(Index size); + Derived& setRandom(Index rows, Index cols); + + #ifdef EIGEN_PLAINOBJECTBASE_PLUGIN + #include EIGEN_PLAINOBJECTBASE_PLUGIN + #endif + + protected: + /** \internal Resizes *this in preparation for assigning \a other to it. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE void _resize_to_match(const EigenBase& other) + { + #ifdef EIGEN_NO_AUTOMATIC_RESIZING + eigen_assert((this->size()==0 || (IsVectorAtCompileTime ? (this->size() == other.size()) + : (rows() == other.rows() && cols() == other.cols()))) + && "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined"); + EIGEN_ONLY_USED_FOR_DEBUG(other); + #else + resizeLike(other); + #endif + } + + /** + * \brief Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + * + * \sa operator=(const MatrixBase&), _set_noalias() + * + * \internal + */ + template + EIGEN_STRONG_INLINE Derived& _set(const DenseBase& other) + { + _set_selector(other.derived(), typename internal::conditional(int(OtherDerived::Flags) & EvalBeforeAssigningBit), internal::true_type, internal::false_type>::type()); + return this->derived(); + } + + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::true_type&) { _set_noalias(other.eval()); } + + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const internal::false_type&) { _set_noalias(other); } + + /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which + * is the case when creating a new matrix) so one can enforce lazy evaluation. + * + * \sa operator=(const MatrixBase&), _set() + */ + template + EIGEN_STRONG_INLINE Derived& _set_noalias(const DenseBase& other) + { + // I don't think we need this resize call since the lazyAssign will anyways resize + // and lazyAssign will be called by the assign selector. + //_resize_to_match(other); + // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because + // it wouldn't allow to copy a row-vector into a column-vector. + return internal::assign_selector::run(this->derived(), other.derived()); + } + + template + EIGEN_STRONG_INLINE void _init2(Index rows, Index cols, typename internal::enable_if::type* = 0) + { + EIGEN_STATIC_ASSERT(bool(NumTraits::IsInteger) && + bool(NumTraits::IsInteger), + FLOATING_POINT_ARGUMENT_PASSED__INTEGER_WAS_EXPECTED) + eigen_assert(rows >= 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols >= 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + internal::check_rows_cols_for_overflow(rows, cols); + m_storage.resize(rows*cols,rows,cols); + EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED + } + template + EIGEN_STRONG_INLINE void _init2(const Scalar& x, const Scalar& y, typename internal::enable_if::type* = 0) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(PlainObjectBase, 2) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + } + + template + friend struct internal::matrix_swap_impl; + + /** \internal generic implementation of swap for dense storage since for dynamic-sized matrices of same type it is enough to swap the + * data pointers. + */ + template + void _swap(DenseBase const & other) + { + enum { SwapPointers = internal::is_same::value && Base::SizeAtCompileTime==Dynamic }; + internal::matrix_swap_impl::run(this->derived(), other.const_cast_derived()); + } + + public: +#ifndef EIGEN_PARSED_BY_DOXYGEN + static EIGEN_STRONG_INLINE void _check_template_params() + { + EIGEN_STATIC_ASSERT((EIGEN_IMPLIES(MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1, (Options&RowMajor)==RowMajor) + && EIGEN_IMPLIES(MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1, (Options&RowMajor)==0) + && ((RowsAtCompileTime == Dynamic) || (RowsAtCompileTime >= 0)) + && ((ColsAtCompileTime == Dynamic) || (ColsAtCompileTime >= 0)) + && ((MaxRowsAtCompileTime == Dynamic) || (MaxRowsAtCompileTime >= 0)) + && ((MaxColsAtCompileTime == Dynamic) || (MaxColsAtCompileTime >= 0)) + && (MaxRowsAtCompileTime == RowsAtCompileTime || RowsAtCompileTime==Dynamic) + && (MaxColsAtCompileTime == ColsAtCompileTime || ColsAtCompileTime==Dynamic) + && (Options & (DontAlign|RowMajor)) == Options), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + } +#endif + +private: + enum { ThisConstantIsPrivateInPlainObjectBase }; +}; + +template +struct internal::conservative_resize_like_impl +{ + typedef typename Derived::Index Index; + static void run(DenseBase& _this, Index rows, Index cols) + { + if (_this.rows() == rows && _this.cols() == cols) return; + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + + if ( ( Derived::IsRowMajor && _this.cols() == cols) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && _this.rows() == rows) ) // column-major and we change only the number of columns + { + internal::check_rows_cols_for_overflow(rows, cols); + _this.derived().m_storage.conservativeResize(rows*cols,rows,cols); + } + else + { + // The storage order does not allow us to use reallocation. + typename Derived::PlainObject tmp(rows,cols); + const Index common_rows = (std::min)(rows, _this.rows()); + const Index common_cols = (std::min)(cols, _this.cols()); + tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); + _this.derived().swap(tmp); + } + } + + static void run(DenseBase& _this, const DenseBase& other) + { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + // Note: Here is space for improvement. Basically, for conservativeResize(Index,Index), + // neither RowsAtCompileTime or ColsAtCompileTime must be Dynamic. If only one of the + // dimensions is dynamic, one could use either conservativeResize(Index rows, NoChange_t) or + // conservativeResize(NoChange_t, Index cols). For these methods new static asserts like + // EIGEN_STATIC_ASSERT_DYNAMIC_ROWS and EIGEN_STATIC_ASSERT_DYNAMIC_COLS would be good. + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived) + EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived) + + if ( ( Derived::IsRowMajor && _this.cols() == other.cols()) || // row-major and we change only the number of rows + (!Derived::IsRowMajor && _this.rows() == other.rows()) ) // column-major and we change only the number of columns + { + const Index new_rows = other.rows() - _this.rows(); + const Index new_cols = other.cols() - _this.cols(); + _this.derived().m_storage.conservativeResize(other.size(),other.rows(),other.cols()); + if (new_rows>0) + _this.bottomRightCorner(new_rows, other.cols()) = other.bottomRows(new_rows); + else if (new_cols>0) + _this.bottomRightCorner(other.rows(), new_cols) = other.rightCols(new_cols); + } + else + { + // The storage order does not allow us to use reallocation. + typename Derived::PlainObject tmp(other); + const Index common_rows = (std::min)(tmp.rows(), _this.rows()); + const Index common_cols = (std::min)(tmp.cols(), _this.cols()); + tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols); + _this.derived().swap(tmp); + } + } +}; + +namespace internal { + +template +struct conservative_resize_like_impl +{ + typedef typename Derived::Index Index; + static void run(DenseBase& _this, Index size) + { + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : size; + const Index new_cols = Derived::RowsAtCompileTime==1 ? size : 1; + _this.derived().m_storage.conservativeResize(size,new_rows,new_cols); + } + + static void run(DenseBase& _this, const DenseBase& other) + { + if (_this.rows() == other.rows() && _this.cols() == other.cols()) return; + + const Index num_new_elements = other.size() - _this.size(); + + const Index new_rows = Derived::RowsAtCompileTime==1 ? 1 : other.rows(); + const Index new_cols = Derived::RowsAtCompileTime==1 ? other.cols() : 1; + _this.derived().m_storage.conservativeResize(other.size(),new_rows,new_cols); + + if (num_new_elements > 0) + _this.tail(num_new_elements) = other.tail(num_new_elements); + } +}; + +template +struct matrix_swap_impl +{ + static inline void run(MatrixTypeA& a, MatrixTypeB& b) + { + a.base().swap(b); + } +}; + +template +struct matrix_swap_impl +{ + static inline void run(MatrixTypeA& a, MatrixTypeB& b) + { + static_cast(a).m_storage.swap(static_cast(b).m_storage); + } +}; + +} // end namespace internal + +} // end namespace Eigen + +#endif // EIGEN_DENSESTORAGEBASE_H diff --git a/latan/Eigen/src/Core/Product.h b/latan/Eigen/src/Core/Product.h new file mode 100644 index 0000000..30aa894 --- /dev/null +++ b/latan/Eigen/src/Core/Product.h @@ -0,0 +1,98 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2011 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PRODUCT_H +#define EIGEN_PRODUCT_H + +template class Product; +template class ProductImpl; + +/** \class Product + * \ingroup Core_Module + * + * \brief Expression of the product of two arbitrary matrices or vectors + * + * \param Lhs the type of the left-hand side expression + * \param Rhs the type of the right-hand side expression + * + * This class represents an expression of the product of two arbitrary matrices. + * + */ + +namespace internal { +template +struct traits > +{ + typedef MatrixXpr XprKind; + typedef typename remove_all::type LhsCleaned; + typedef typename remove_all::type RhsCleaned; + typedef typename scalar_product_traits::Scalar, typename traits::Scalar>::ReturnType Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + enum { + RowsAtCompileTime = LhsCleaned::RowsAtCompileTime, + ColsAtCompileTime = RhsCleaned::ColsAtCompileTime, + MaxRowsAtCompileTime = LhsCleaned::MaxRowsAtCompileTime, + MaxColsAtCompileTime = RhsCleaned::MaxColsAtCompileTime, + Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0), // TODO should be no storage order + CoeffReadCost = 0 // TODO CoeffReadCost should not be part of the expression traits + }; +}; +} // end namespace internal + + +template +class Product : public ProductImpl::StorageKind, + typename internal::traits::StorageKind>::ret> +{ + public: + + typedef typename ProductImpl< + Lhs, Rhs, + typename internal::promote_storage_type::ret>::Base Base; + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename internal::remove_all::type LhsNestedCleaned; + typedef typename internal::remove_all::type RhsNestedCleaned; + + Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) + { + eigen_assert(lhs.cols() == rhs.rows() + && "invalid matrix product" + && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); + } + + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } + + const LhsNestedCleaned& lhs() const { return m_lhs; } + const RhsNestedCleaned& rhs() const { return m_rhs; } + + protected: + + const LhsNested m_lhs; + const RhsNested m_rhs; +}; + +template +class ProductImpl : public internal::dense_xpr_base >::type +{ + typedef Product Derived; + public: + + typedef typename internal::dense_xpr_base >::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) +}; + +#endif // EIGEN_PRODUCT_H diff --git a/latan/Eigen/src/Core/ProductBase.h b/latan/Eigen/src/Core/ProductBase.h new file mode 100644 index 0000000..ec12e5c --- /dev/null +++ b/latan/Eigen/src/Core/ProductBase.h @@ -0,0 +1,278 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_PRODUCTBASE_H +#define EIGEN_PRODUCTBASE_H + +namespace Eigen { + +/** \class ProductBase + * \ingroup Core_Module + * + */ + +namespace internal { +template +struct traits > +{ + typedef MatrixXpr XprKind; + typedef typename remove_all<_Lhs>::type Lhs; + typedef typename remove_all<_Rhs>::type Rhs; + typedef typename scalar_product_traits::ReturnType Scalar; + typedef typename promote_storage_type::StorageKind, + typename traits::StorageKind>::ret StorageKind; + typedef typename promote_index_type::Index, + typename traits::Index>::type Index; + enum { + RowsAtCompileTime = traits::RowsAtCompileTime, + ColsAtCompileTime = traits::ColsAtCompileTime, + MaxRowsAtCompileTime = traits::MaxRowsAtCompileTime, + MaxColsAtCompileTime = traits::MaxColsAtCompileTime, + Flags = (MaxRowsAtCompileTime==1 ? RowMajorBit : 0) + | EvalBeforeNestingBit | EvalBeforeAssigningBit | NestByRefBit, + // Note that EvalBeforeNestingBit and NestByRefBit + // are not used in practice because nested is overloaded for products + CoeffReadCost = 0 // FIXME why is it needed ? + }; +}; +} + +#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived) \ + typedef ProductBase Base; \ + EIGEN_DENSE_PUBLIC_INTERFACE(Derived) \ + typedef typename Base::LhsNested LhsNested; \ + typedef typename Base::_LhsNested _LhsNested; \ + typedef typename Base::LhsBlasTraits LhsBlasTraits; \ + typedef typename Base::ActualLhsType ActualLhsType; \ + typedef typename Base::_ActualLhsType _ActualLhsType; \ + typedef typename Base::RhsNested RhsNested; \ + typedef typename Base::_RhsNested _RhsNested; \ + typedef typename Base::RhsBlasTraits RhsBlasTraits; \ + typedef typename Base::ActualRhsType ActualRhsType; \ + typedef typename Base::_ActualRhsType _ActualRhsType; \ + using Base::m_lhs; \ + using Base::m_rhs; + +template +class ProductBase : public MatrixBase +{ + public: + typedef MatrixBase Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ProductBase) + + typedef typename Lhs::Nested LhsNested; + typedef typename internal::remove_all::type _LhsNested; + typedef internal::blas_traits<_LhsNested> LhsBlasTraits; + typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; + typedef typename internal::remove_all::type _ActualLhsType; + typedef typename internal::traits::Scalar LhsScalar; + + typedef typename Rhs::Nested RhsNested; + typedef typename internal::remove_all::type _RhsNested; + typedef internal::blas_traits<_RhsNested> RhsBlasTraits; + typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; + typedef typename internal::remove_all::type _ActualRhsType; + typedef typename internal::traits::Scalar RhsScalar; + + // Diagonal of a product: no need to evaluate the arguments because they are going to be evaluated only once + typedef CoeffBasedProduct FullyLazyCoeffBaseProductType; + + public: + + typedef typename Base::PlainObject PlainObject; + + ProductBase(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + eigen_assert(lhs.cols() == rhs.rows() + && "invalid matrix product" + && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); + } + + inline Index rows() const { return m_lhs.rows(); } + inline Index cols() const { return m_rhs.cols(); } + + template + inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst,Scalar(1)); } + + template + inline void addTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(1)); } + + template + inline void subTo(Dest& dst) const { scaleAndAddTo(dst,Scalar(-1)); } + + template + inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { derived().scaleAndAddTo(dst,alpha); } + + const _LhsNested& lhs() const { return m_lhs; } + const _RhsNested& rhs() const { return m_rhs; } + + // Implicit conversion to the nested type (trigger the evaluation of the product) + operator const PlainObject& () const + { + m_result.resize(m_lhs.rows(), m_rhs.cols()); + derived().evalTo(m_result); + return m_result; + } + + const Diagonal diagonal() const + { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } + + template + const Diagonal diagonal() const + { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs); } + + const Diagonal diagonal(Index index) const + { return FullyLazyCoeffBaseProductType(m_lhs, m_rhs).diagonal(index); } + + // restrict coeff accessors to 1x1 expressions. No need to care about mutators here since this isnt a Lvalue expression + typename Base::CoeffReturnType coeff(Index row, Index col) const + { +#ifdef EIGEN2_SUPPORT + return lhs().row(row).cwiseProduct(rhs().col(col).transpose()).sum(); +#else + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + Matrix result = *this; + return result.coeff(row,col); +#endif + } + + typename Base::CoeffReturnType coeff(Index i) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + Matrix result = *this; + return result.coeff(i); + } + + const Scalar& coeffRef(Index row, Index col) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeffRef(row,col); + } + + const Scalar& coeffRef(Index i) const + { + EIGEN_STATIC_ASSERT_SIZE_1x1(Derived) + eigen_assert(this->rows() == 1 && this->cols() == 1); + return derived().coeffRef(i); + } + + protected: + + LhsNested m_lhs; + RhsNested m_rhs; + + mutable PlainObject m_result; +}; + +// here we need to overload the nested rule for products +// such that the nested type is a const reference to a plain matrix +namespace internal { +template +struct nested, N, PlainObject> +{ + typedef PlainObject const& type; +}; +} + +template +class ScaledProduct; + +// Note that these two operator* functions are not defined as member +// functions of ProductBase, because, otherwise we would have to +// define all overloads defined in MatrixBase. Furthermore, Using +// "using Base::operator*" would not work with MSVC. +// +// Also note that here we accept any compatible scalar types +template +const ScaledProduct +operator*(const ProductBase& prod, typename Derived::Scalar x) +{ return ScaledProduct(prod.derived(), x); } + +template +typename internal::enable_if::value, + const ScaledProduct >::type +operator*(const ProductBase& prod, typename Derived::RealScalar x) +{ return ScaledProduct(prod.derived(), x); } + + +template +const ScaledProduct +operator*(typename Derived::Scalar x,const ProductBase& prod) +{ return ScaledProduct(prod.derived(), x); } + +template +typename internal::enable_if::value, + const ScaledProduct >::type +operator*(typename Derived::RealScalar x,const ProductBase& prod) +{ return ScaledProduct(prod.derived(), x); } + +namespace internal { +template +struct traits > + : traits, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> > +{ + typedef typename traits::StorageKind StorageKind; +}; +} + +template +class ScaledProduct + : public ProductBase, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> +{ + public: + typedef ProductBase, + typename NestedProduct::_LhsNested, + typename NestedProduct::_RhsNested> Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::PlainObject PlainObject; +// EIGEN_PRODUCT_PUBLIC_INTERFACE(ScaledProduct) + + ScaledProduct(const NestedProduct& prod, Scalar x) + : Base(prod.lhs(),prod.rhs()), m_prod(prod), m_alpha(x) {} + + template + inline void evalTo(Dest& dst) const { dst.setZero(); scaleAndAddTo(dst, Scalar(1)); } + + template + inline void addTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(1)); } + + template + inline void subTo(Dest& dst) const { scaleAndAddTo(dst, Scalar(-1)); } + + template + inline void scaleAndAddTo(Dest& dst,Scalar alpha) const { m_prod.derived().scaleAndAddTo(dst,alpha * m_alpha); } + + const Scalar& alpha() const { return m_alpha; } + + protected: + const NestedProduct& m_prod; + Scalar m_alpha; +}; + +/** \internal + * Overloaded to perform an efficient C = (A*B).lazy() */ +template +template +Derived& MatrixBase::lazyAssign(const ProductBase& other) +{ + other.derived().evalTo(derived()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_PRODUCTBASE_H diff --git a/latan/Eigen/src/Core/Random.h b/latan/Eigen/src/Core/Random.h new file mode 100644 index 0000000..a9f7f43 --- /dev/null +++ b/latan/Eigen/src/Core/Random.h @@ -0,0 +1,152 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RANDOM_H +#define EIGEN_RANDOM_H + +namespace Eigen { + +namespace internal { + +template struct scalar_random_op { + EIGEN_EMPTY_STRUCT_CTOR(scalar_random_op) + template + inline const Scalar operator() (Index, Index = 0) const { return random(); } +}; + +template +struct functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; }; + +} // end namespace internal + +/** \returns a random matrix expression + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Random() should be used + * instead. + * + * Example: \include MatrixBase_random_int_int.cpp + * Output: \verbinclude MatrixBase_random_int_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index), MatrixBase::Random() + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +DenseBase::Random(Index rows, Index cols) +{ + return NullaryExpr(rows, cols, internal::scalar_random_op()); +} + +/** \returns a random vector expression + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Random() should be used + * instead. + * + * Example: \include MatrixBase_random_int.cpp + * Output: \verbinclude MatrixBase_random_int.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary vector whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random() + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +DenseBase::Random(Index size) +{ + return NullaryExpr(size, internal::scalar_random_op()); +} + +/** \returns a fixed-size random matrix or vector expression + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_random.cpp + * Output: \verbinclude MatrixBase_random.out + * + * This expression has the "evaluate before nesting" flag so that it will be evaluated into + * a temporary matrix whenever it is nested in a larger expression. This prevents unexpected + * behavior with expressions involving random matrices. + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(Index,Index), MatrixBase::Random(Index) + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +DenseBase::Random() +{ + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, internal::scalar_random_op()); +} + +/** Sets all coefficients in this expression to random values. + * + * Example: \include MatrixBase_setRandom.cpp + * Output: \verbinclude MatrixBase_setRandom.out + * + * \sa class CwiseNullaryOp, setRandom(Index), setRandom(Index,Index) + */ +template +inline Derived& DenseBase::setRandom() +{ + return *this = Random(rows(), cols()); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to random values. + * + * \only_for_vectors + * + * Example: \include Matrix_setRandom_int.cpp + * Output: \verbinclude Matrix_setRandom_int.out + * + * \sa MatrixBase::setRandom(), setRandom(Index,Index), class CwiseNullaryOp, MatrixBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(Index size) +{ + resize(size); + return setRandom(); +} + +/** Resizes to the given size, and sets all coefficients in this expression to random values. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setRandom_int_int.cpp + * Output: \verbinclude Matrix_setRandom_int_int.out + * + * \sa MatrixBase::setRandom(), setRandom(Index), class CwiseNullaryOp, MatrixBase::Random() + */ +template +EIGEN_STRONG_INLINE Derived& +PlainObjectBase::setRandom(Index rows, Index cols) +{ + resize(rows, cols); + return setRandom(); +} + +} // end namespace Eigen + +#endif // EIGEN_RANDOM_H diff --git a/latan/Eigen/src/Core/Redux.h b/latan/Eigen/src/Core/Redux.h new file mode 100644 index 0000000..b7ce7c6 --- /dev/null +++ b/latan/Eigen/src/Core/Redux.h @@ -0,0 +1,406 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REDUX_H +#define EIGEN_REDUX_H + +namespace Eigen { + +namespace internal { + +// TODO +// * implement other kind of vectorization +// * factorize code + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for vectorization and unrolling +***************************************************************************/ + +template +struct redux_traits +{ +public: + enum { + PacketSize = packet_traits::size, + InnerMaxSize = int(Derived::IsRowMajor) + ? Derived::MaxColsAtCompileTime + : Derived::MaxRowsAtCompileTime + }; + + enum { + MightVectorize = (int(Derived::Flags)&ActualPacketAccessBit) + && (functor_traits::PacketAccess), + MayLinearVectorize = MightVectorize && (int(Derived::Flags)&LinearAccessBit), + MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize + }; + +public: + enum { + Traversal = int(MayLinearVectorize) ? int(LinearVectorizedTraversal) + : int(MaySliceVectorize) ? int(SliceVectorizedTraversal) + : int(DefaultTraversal) + }; + +public: + enum { + Cost = ( Derived::SizeAtCompileTime == Dynamic + || Derived::CoeffReadCost == Dynamic + || (Derived::SizeAtCompileTime!=1 && functor_traits::Cost == Dynamic) + ) ? Dynamic + : Derived::SizeAtCompileTime * Derived::CoeffReadCost + + (Derived::SizeAtCompileTime-1) * functor_traits::Cost, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Traversal) == int(DefaultTraversal) ? 1 : int(PacketSize)) + }; + +public: + enum { + Unrolling = Cost != Dynamic && Cost <= UnrollingLimit + ? CompleteUnrolling + : NoUnrolling + }; +}; + +/*************************************************************************** +* Part 2 : unrollers +***************************************************************************/ + +/*** no vectorization ***/ + +template +struct redux_novec_unroller +{ + enum { + HalfLength = Length/2 + }; + + typedef typename Derived::Scalar Scalar; + + static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func& func) + { + return func(redux_novec_unroller::run(mat,func), + redux_novec_unroller::run(mat,func)); + } +}; + +template +struct redux_novec_unroller +{ + enum { + outer = Start / Derived::InnerSizeAtCompileTime, + inner = Start % Derived::InnerSizeAtCompileTime + }; + + typedef typename Derived::Scalar Scalar; + + static EIGEN_STRONG_INLINE Scalar run(const Derived &mat, const Func&) + { + return mat.coeffByOuterInner(outer, inner); + } +}; + +// This is actually dead code and will never be called. It is required +// to prevent false warnings regarding failed inlining though +// for 0 length run() will never be called at all. +template +struct redux_novec_unroller +{ + typedef typename Derived::Scalar Scalar; + static EIGEN_STRONG_INLINE Scalar run(const Derived&, const Func&) { return Scalar(); } +}; + +/*** vectorization ***/ + +template +struct redux_vec_unroller +{ + enum { + PacketSize = packet_traits::size, + HalfLength = Length/2 + }; + + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type PacketScalar; + + static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func& func) + { + return func.packetOp( + redux_vec_unroller::run(mat,func), + redux_vec_unroller::run(mat,func) ); + } +}; + +template +struct redux_vec_unroller +{ + enum { + index = Start * packet_traits::size, + outer = index / int(Derived::InnerSizeAtCompileTime), + inner = index % int(Derived::InnerSizeAtCompileTime), + alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned + }; + + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type PacketScalar; + + static EIGEN_STRONG_INLINE PacketScalar run(const Derived &mat, const Func&) + { + return mat.template packetByOuterInner(outer, inner); + } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Traversal, + int Unrolling = redux_traits::Unrolling +> +struct redux_impl; + +template +struct redux_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename Derived::Index Index; + static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func) + { + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + Scalar res; + res = mat.coeffByOuterInner(0, 0); + for(Index i = 1; i < mat.innerSize(); ++i) + res = func(res, mat.coeffByOuterInner(0, i)); + for(Index i = 1; i < mat.outerSize(); ++i) + for(Index j = 0; j < mat.innerSize(); ++j) + res = func(res, mat.coeffByOuterInner(i, j)); + return res; + } +}; + +template +struct redux_impl + : public redux_novec_unroller +{}; + +template +struct redux_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type PacketScalar; + typedef typename Derived::Index Index; + + static Scalar run(const Derived& mat, const Func& func) + { + const Index size = mat.size(); + eigen_assert(size && "you are using an empty matrix"); + const Index packetSize = packet_traits::size; + const Index alignedStart = internal::first_aligned(mat); + enum { + alignment = bool(Derived::Flags & DirectAccessBit) || bool(Derived::Flags & AlignedBit) + ? Aligned : Unaligned + }; + const Index alignedSize2 = ((size-alignedStart)/(2*packetSize))*(2*packetSize); + const Index alignedSize = ((size-alignedStart)/(packetSize))*(packetSize); + const Index alignedEnd2 = alignedStart + alignedSize2; + const Index alignedEnd = alignedStart + alignedSize; + Scalar res; + if(alignedSize) + { + PacketScalar packet_res0 = mat.template packet(alignedStart); + if(alignedSize>packetSize) // we have at least two packets to partly unroll the loop + { + PacketScalar packet_res1 = mat.template packet(alignedStart+packetSize); + for(Index index = alignedStart + 2*packetSize; index < alignedEnd2; index += 2*packetSize) + { + packet_res0 = func.packetOp(packet_res0, mat.template packet(index)); + packet_res1 = func.packetOp(packet_res1, mat.template packet(index+packetSize)); + } + + packet_res0 = func.packetOp(packet_res0,packet_res1); + if(alignedEnd>alignedEnd2) + packet_res0 = func.packetOp(packet_res0, mat.template packet(alignedEnd2)); + } + res = func.predux(packet_res0); + + for(Index index = 0; index < alignedStart; ++index) + res = func(res,mat.coeff(index)); + + for(Index index = alignedEnd; index < size; ++index) + res = func(res,mat.coeff(index)); + } + else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = mat.coeff(0); + for(Index index = 1; index < size; ++index) + res = func(res,mat.coeff(index)); + } + + return res; + } +}; + +template +struct redux_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type PacketScalar; + typedef typename Derived::Index Index; + + static Scalar run(const Derived& mat, const Func& func) + { + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + const Index innerSize = mat.innerSize(); + const Index outerSize = mat.outerSize(); + enum { + packetSize = packet_traits::size + }; + const Index packetedInnerSize = ((innerSize)/packetSize)*packetSize; + Scalar res; + if(packetedInnerSize) + { + PacketScalar packet_res = mat.template packet(0,0); + for(Index j=0; j(j,i)); + + res = func.predux(packet_res); + for(Index j=0; j::run(mat, func); + } + + return res; + } +}; + +template +struct redux_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename packet_traits::type PacketScalar; + enum { + PacketSize = packet_traits::size, + Size = Derived::SizeAtCompileTime, + VectorizedSize = (Size / PacketSize) * PacketSize + }; + static EIGEN_STRONG_INLINE Scalar run(const Derived& mat, const Func& func) + { + eigen_assert(mat.rows()>0 && mat.cols()>0 && "you are using an empty matrix"); + Scalar res = func.predux(redux_vec_unroller::run(mat,func)); + if (VectorizedSize != Size) + res = func(res,redux_novec_unroller::run(mat,func)); + return res; + } +}; + +} // end namespace internal + +/*************************************************************************** +* Part 4 : public API +***************************************************************************/ + + +/** \returns the result of a full redux operation on the whole matrix or vector using \a func + * + * The template parameter \a BinaryOp is the type of the functor \a func which must be + * an associative operator. Both current STL and TR1 functor styles are handled. + * + * \sa DenseBase::sum(), DenseBase::minCoeff(), DenseBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() + */ +template +template +EIGEN_STRONG_INLINE typename internal::result_of::Scalar)>::type +DenseBase::redux(const Func& func) const +{ + typedef typename internal::remove_all::type ThisNested; + return internal::redux_impl + ::run(derived(), func); +} + +/** \returns the minimum of all coefficients of *this + */ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::minCoeff() const +{ + return this->redux(Eigen::internal::scalar_min_op()); +} + +/** \returns the maximum of all coefficients of *this + */ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::maxCoeff() const +{ + return this->redux(Eigen::internal::scalar_max_op()); +} + +/** \returns the sum of all coefficients of *this + * + * \sa trace(), prod(), mean() + */ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::sum() const +{ + if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) + return Scalar(0); + return this->redux(Eigen::internal::scalar_sum_op()); +} + +/** \returns the mean of all coefficients of *this +* +* \sa trace(), prod(), sum() +*/ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::mean() const +{ + return Scalar(this->redux(Eigen::internal::scalar_sum_op())) / Scalar(this->size()); +} + +/** \returns the product of all coefficients of *this + * + * Example: \include MatrixBase_prod.cpp + * Output: \verbinclude MatrixBase_prod.out + * + * \sa sum(), mean(), trace() + */ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +DenseBase::prod() const +{ + if(SizeAtCompileTime==0 || (SizeAtCompileTime==Dynamic && size()==0)) + return Scalar(1); + return this->redux(Eigen::internal::scalar_product_op()); +} + +/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. + * + * \c *this can be any matrix, not necessarily square. + * + * \sa diagonal(), sum() + */ +template +EIGEN_STRONG_INLINE typename internal::traits::Scalar +MatrixBase::trace() const +{ + return derived().diagonal().sum(); +} + +} // end namespace Eigen + +#endif // EIGEN_REDUX_H diff --git a/latan/Eigen/src/Core/Replicate.h b/latan/Eigen/src/Core/Replicate.h new file mode 100644 index 0000000..b61fdc2 --- /dev/null +++ b/latan/Eigen/src/Core/Replicate.h @@ -0,0 +1,177 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REPLICATE_H +#define EIGEN_REPLICATE_H + +namespace Eigen { + +/** + * \class Replicate + * \ingroup Core_Module + * + * \brief Expression of the multiple replication of a matrix or vector + * + * \param MatrixType the type of the object we are replicating + * + * This class represents an expression of the multiple replication of a matrix or vector. + * It is the return type of DenseBase::replicate() and most of the time + * this is the only way it is used. + * + * \sa DenseBase::replicate() + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + enum { + Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor + }; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + enum { + RowsAtCompileTime = RowFactor==Dynamic || int(MatrixType::RowsAtCompileTime)==Dynamic + ? Dynamic + : RowFactor * MatrixType::RowsAtCompileTime, + ColsAtCompileTime = ColFactor==Dynamic || int(MatrixType::ColsAtCompileTime)==Dynamic + ? Dynamic + : ColFactor * MatrixType::ColsAtCompileTime, + //FIXME we don't propagate the max sizes !!! + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + IsRowMajor = MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1 ? 1 + : MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1 ? 0 + : (MatrixType::Flags & RowMajorBit) ? 1 : 0, + Flags = (_MatrixTypeNested::Flags & HereditaryBits & ~RowMajorBit) | (IsRowMajor ? RowMajorBit : 0), + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; +} + +template class Replicate + : public internal::dense_xpr_base< Replicate >::type +{ + typedef typename internal::traits::MatrixTypeNested MatrixTypeNested; + typedef typename internal::traits::_MatrixTypeNested _MatrixTypeNested; + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Replicate) + + template + inline explicit Replicate(const OriginalMatrixType& matrix) + : m_matrix(matrix), m_rowFactor(RowFactor), m_colFactor(ColFactor) + { + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + eigen_assert(RowFactor!=Dynamic && ColFactor!=Dynamic); + } + + template + inline Replicate(const OriginalMatrixType& matrix, Index rowFactor, Index colFactor) + : m_matrix(matrix), m_rowFactor(rowFactor), m_colFactor(colFactor) + { + EIGEN_STATIC_ASSERT((internal::is_same::type,OriginalMatrixType>::value), + THE_MATRIX_OR_EXPRESSION_THAT_YOU_PASSED_DOES_NOT_HAVE_THE_EXPECTED_TYPE) + } + + inline Index rows() const { return m_matrix.rows() * m_rowFactor.value(); } + inline Index cols() const { return m_matrix.cols() * m_colFactor.value(); } + + inline Scalar coeff(Index row, Index col) const + { + // try to avoid using modulo; this is a pure optimization strategy + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row%m_matrix.rows(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col%m_matrix.cols(); + + return m_matrix.coeff(actual_row, actual_col); + } + template + inline PacketScalar packet(Index row, Index col) const + { + const Index actual_row = internal::traits::RowsAtCompileTime==1 ? 0 + : RowFactor==1 ? row + : row%m_matrix.rows(); + const Index actual_col = internal::traits::ColsAtCompileTime==1 ? 0 + : ColFactor==1 ? col + : col%m_matrix.cols(); + + return m_matrix.template packet(actual_row, actual_col); + } + + const _MatrixTypeNested& nestedExpression() const + { + return m_matrix; + } + + protected: + MatrixTypeNested m_matrix; + const internal::variable_if_dynamic m_rowFactor; + const internal::variable_if_dynamic m_colFactor; +}; + +/** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate.cpp + * Output: \verbinclude MatrixBase_replicate.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(Index,Index), class Replicate + */ +template +template +inline const Replicate +DenseBase::replicate() const +{ + return Replicate(derived()); +} + +/** + * \return an expression of the replication of \c *this + * + * Example: \include MatrixBase_replicate_int_int.cpp + * Output: \verbinclude MatrixBase_replicate_int_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ +template +inline const Replicate +DenseBase::replicate(Index rowFactor,Index colFactor) const +{ + return Replicate(derived(),rowFactor,colFactor); +} + +/** + * \return an expression of the replication of each column (or row) of \c *this + * + * Example: \include DirectionWise_replicate_int.cpp + * Output: \verbinclude DirectionWise_replicate_int.out + * + * \sa VectorwiseOp::replicate(), DenseBase::replicate(), class Replicate + */ +template +const typename VectorwiseOp::ReplicateReturnType +VectorwiseOp::replicate(Index factor) const +{ + return typename VectorwiseOp::ReplicateReturnType + (_expression(),Direction==Vertical?factor:1,Direction==Horizontal?factor:1); +} + +} // end namespace Eigen + +#endif // EIGEN_REPLICATE_H diff --git a/latan/Eigen/src/Core/ReturnByValue.h b/latan/Eigen/src/Core/ReturnByValue.h new file mode 100644 index 0000000..613912f --- /dev/null +++ b/latan/Eigen/src/Core/ReturnByValue.h @@ -0,0 +1,88 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009-2010 Gael Guennebaud +// Copyright (C) 2009-2010 Benoit Jacob +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_RETURNBYVALUE_H +#define EIGEN_RETURNBYVALUE_H + +namespace Eigen { + +/** \class ReturnByValue + * \ingroup Core_Module + * + */ + +namespace internal { + +template +struct traits > + : public traits::ReturnType> +{ + enum { + // We're disabling the DirectAccess because e.g. the constructor of + // the Block-with-DirectAccess expression requires to have a coeffRef method. + // Also, we don't want to have to implement the stride stuff. + Flags = (traits::ReturnType>::Flags + | EvalBeforeNestingBit) & ~DirectAccessBit + }; +}; + +/* The ReturnByValue object doesn't even have a coeff() method. + * So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix. + * So internal::nested always gives the plain return matrix type. + * + * FIXME: I don't understand why we need this specialization: isn't this taken care of by the EvalBeforeNestingBit ?? + */ +template +struct nested, n, PlainObject> +{ + typedef typename traits::ReturnType type; +}; + +} // end namespace internal + +template class ReturnByValue + : public internal::dense_xpr_base< ReturnByValue >::type +{ + public: + typedef typename internal::traits::ReturnType ReturnType; + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue) + + template + inline void evalTo(Dest& dst) const + { static_cast(this)->evalTo(dst); } + inline Index rows() const { return static_cast(this)->rows(); } + inline Index cols() const { return static_cast(this)->cols(); } + +#ifndef EIGEN_PARSED_BY_DOXYGEN +#define Unusable YOU_ARE_TRYING_TO_ACCESS_A_SINGLE_COEFFICIENT_IN_A_SPECIAL_EXPRESSION_WHERE_THAT_IS_NOT_ALLOWED_BECAUSE_THAT_WOULD_BE_INEFFICIENT + class Unusable{ + Unusable(const Unusable&) {} + Unusable& operator=(const Unusable&) {return *this;} + }; + const Unusable& coeff(Index) const { return *reinterpret_cast(this); } + const Unusable& coeff(Index,Index) const { return *reinterpret_cast(this); } + Unusable& coeffRef(Index) { return *reinterpret_cast(this); } + Unusable& coeffRef(Index,Index) { return *reinterpret_cast(this); } +#endif +}; + +template +template +Derived& DenseBase::operator=(const ReturnByValue& other) +{ + other.evalTo(derived()); + return derived(); +} + +} // end namespace Eigen + +#endif // EIGEN_RETURNBYVALUE_H diff --git a/latan/Eigen/src/Core/Reverse.h b/latan/Eigen/src/Core/Reverse.h new file mode 100644 index 0000000..e30ae3d --- /dev/null +++ b/latan/Eigen/src/Core/Reverse.h @@ -0,0 +1,224 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2009 Ricard Marxer +// Copyright (C) 2009-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_REVERSE_H +#define EIGEN_REVERSE_H + +namespace Eigen { + +/** \class Reverse + * \ingroup Core_Module + * + * \brief Expression of the reverse of a vector or matrix + * + * \param MatrixType the type of the object of which we are taking the reverse + * + * This class represents an expression of the reverse of a vector. + * It is the return type of MatrixBase::reverse() and VectorwiseOp::reverse() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::reverse(), VectorwiseOp::reverse() + */ + +namespace internal { + +template +struct traits > + : traits +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename traits::StorageKind StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename nested::type MatrixTypeNested; + typedef typename remove_reference::type _MatrixTypeNested; + enum { + RowsAtCompileTime = MatrixType::RowsAtCompileTime, + ColsAtCompileTime = MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, + + // let's enable LinearAccess only with vectorization because of the product overhead + LinearAccess = ( (Direction==BothDirections) && (int(_MatrixTypeNested::Flags)&PacketAccessBit) ) + ? LinearAccessBit : 0, + + Flags = int(_MatrixTypeNested::Flags) & (HereditaryBits | LvalueBit | PacketAccessBit | LinearAccess), + + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; + +template struct reverse_packet_cond +{ + static inline PacketScalar run(const PacketScalar& x) { return preverse(x); } +}; + +template struct reverse_packet_cond +{ + static inline PacketScalar run(const PacketScalar& x) { return x; } +}; + +} // end namespace internal + +template class Reverse + : public internal::dense_xpr_base< Reverse >::type +{ + public: + + typedef typename internal::dense_xpr_base::type Base; + EIGEN_DENSE_PUBLIC_INTERFACE(Reverse) + using Base::IsRowMajor; + + // next line is necessary because otherwise const version of operator() + // is hidden by non-const version defined in this file + using Base::operator(); + + protected: + enum { + PacketSize = internal::packet_traits::size, + IsColMajor = !IsRowMajor, + ReverseRow = (Direction == Vertical) || (Direction == BothDirections), + ReverseCol = (Direction == Horizontal) || (Direction == BothDirections), + OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1, + OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1, + ReversePacket = (Direction == BothDirections) + || ((Direction == Vertical) && IsColMajor) + || ((Direction == Horizontal) && IsRowMajor) + }; + typedef internal::reverse_packet_cond reverse_packet; + public: + + inline Reverse(const MatrixType& matrix) : m_matrix(matrix) { } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Reverse) + + inline Index rows() const { return m_matrix.rows(); } + inline Index cols() const { return m_matrix.cols(); } + + inline Index innerStride() const + { + return -m_matrix.innerStride(); + } + + inline Scalar& operator()(Index row, Index col) + { + eigen_assert(row >= 0 && row < rows() && col >= 0 && col < cols()); + return coeffRef(row, col); + } + + inline Scalar& coeffRef(Index row, Index col) + { + return m_matrix.const_cast_derived().coeffRef(ReverseRow ? m_matrix.rows() - row - 1 : row, + ReverseCol ? m_matrix.cols() - col - 1 : col); + } + + inline CoeffReturnType coeff(Index row, Index col) const + { + return m_matrix.coeff(ReverseRow ? m_matrix.rows() - row - 1 : row, + ReverseCol ? m_matrix.cols() - col - 1 : col); + } + + inline CoeffReturnType coeff(Index index) const + { + return m_matrix.coeff(m_matrix.size() - index - 1); + } + + inline Scalar& coeffRef(Index index) + { + return m_matrix.const_cast_derived().coeffRef(m_matrix.size() - index - 1); + } + + inline Scalar& operator()(Index index) + { + eigen_assert(index >= 0 && index < m_matrix.size()); + return coeffRef(index); + } + + template + inline const PacketScalar packet(Index row, Index col) const + { + return reverse_packet::run(m_matrix.template packet( + ReverseRow ? m_matrix.rows() - row - OffsetRow : row, + ReverseCol ? m_matrix.cols() - col - OffsetCol : col)); + } + + template + inline void writePacket(Index row, Index col, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket( + ReverseRow ? m_matrix.rows() - row - OffsetRow : row, + ReverseCol ? m_matrix.cols() - col - OffsetCol : col, + reverse_packet::run(x)); + } + + template + inline const PacketScalar packet(Index index) const + { + return internal::preverse(m_matrix.template packet( m_matrix.size() - index - PacketSize )); + } + + template + inline void writePacket(Index index, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(m_matrix.size() - index - PacketSize, internal::preverse(x)); + } + + const typename internal::remove_all::type& + nestedExpression() const + { + return m_matrix; + } + + protected: + typename MatrixType::Nested m_matrix; +}; + +/** \returns an expression of the reverse of *this. + * + * Example: \include MatrixBase_reverse.cpp + * Output: \verbinclude MatrixBase_reverse.out + * + */ +template +inline typename DenseBase::ReverseReturnType +DenseBase::reverse() +{ + return derived(); +} + +/** This is the const version of reverse(). */ +template +inline const typename DenseBase::ConstReverseReturnType +DenseBase::reverse() const +{ + return derived(); +} + +/** This is the "in place" version of reverse: it reverses \c *this. + * + * In most cases it is probably better to simply use the reversed expression + * of a matrix. However, when reversing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional features: + * - less error prone: doing the same operation with .reverse() requires special care: + * \code m = m.reverse().eval(); \endcode + * - this API allows to avoid creating a temporary (the current implementation creates a temporary, but that could be avoided using swap) + * - it allows future optimizations (cache friendliness, etc.) + * + * \sa reverse() */ +template +inline void DenseBase::reverseInPlace() +{ + derived() = derived().reverse().eval(); +} + +} // end namespace Eigen + +#endif // EIGEN_REVERSE_H diff --git a/latan/Eigen/src/Core/Select.h b/latan/Eigen/src/Core/Select.h new file mode 100644 index 0000000..2bf6e91 --- /dev/null +++ b/latan/Eigen/src/Core/Select.h @@ -0,0 +1,162 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008-2010 Gael Guennebaud +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + +#ifndef EIGEN_SELECT_H +#define EIGEN_SELECT_H + +namespace Eigen { + +/** \class Select + * \ingroup Core_Module + * + * \brief Expression of a coefficient wise version of the C++ ternary operator ?: + * + * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix + * \param ThenMatrixType the type of the \em then expression + * \param ElseMatrixType the type of the \em else expression + * + * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. + * It is the return type of DenseBase::select() and most of the time this is the only way it is used. + * + * \sa DenseBase::select(const DenseBase&, const DenseBase&) const + */ + +namespace internal { +template +struct traits > + : traits +{ + typedef typename traits::Scalar Scalar; + typedef Dense StorageKind; + typedef typename traits::XprKind XprKind; + typedef typename ConditionMatrixType::Nested ConditionMatrixNested; + typedef typename ThenMatrixType::Nested ThenMatrixNested; + typedef typename ElseMatrixType::Nested ElseMatrixNested; + enum { + RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, + ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, + Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, + CoeffReadCost = traits::type>::CoeffReadCost + + EIGEN_SIZE_MAX(traits::type>::CoeffReadCost, + traits::type>::CoeffReadCost) + }; +}; +} + +template +class Select : internal::no_assignment_operator, + public internal::dense_xpr_base< Select >::type +{ + public: + + typedef typename internal::dense_xpr_base