summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArnur Nigmetov <a.nigmetov@gmail.com>2016-06-06 10:50:37 +0200
committerArnur Nigmetov <a.nigmetov@gmail.com>2016-06-06 10:50:37 +0200
commitad17f9570a5f0a35cde44cc206255e889821a5ca (patch)
tree6cb08c80206106a6b1d2ac605bf0b673eaed1d95
parent0a997312d06972b8eef9f1de21fb4d827b47eca7 (diff)
Add actual source from previous repos
-rw-r--r--geom_bottleneck/.gitignore37
-rw-r--r--geom_bottleneck/COPYING674
-rw-r--r--geom_bottleneck/COPYING.LESSER165
-rw-r--r--geom_bottleneck/README98
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/ANN.h906
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/ANNperf.h225
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/ANNx.h127
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/bd_tree.h102
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_fix_rad_search.h46
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_pr_search.h51
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_search.h50
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_split.h123
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_tree.h253
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/kd_util.h126
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/pr_queue.h127
-rw-r--r--geom_bottleneck/bottleneck/include/ANN/pr_queue_k.h120
-rw-r--r--geom_bottleneck/bottleneck/include/basic_defs_bt.h188
-rw-r--r--geom_bottleneck/bottleneck/include/bottleneck.h100
-rw-r--r--geom_bottleneck/bottleneck/include/bound_match.h80
-rw-r--r--geom_bottleneck/bottleneck/include/def_debug.h29
-rw-r--r--geom_bottleneck/bottleneck/include/neighb_oracle.h91
-rw-r--r--geom_bottleneck/bottleneck/lib/dummy1
-rw-r--r--geom_bottleneck/bottleneck/src/ann/ANN.cpp230
-rw-r--r--geom_bottleneck/bottleneck/src/ann/bd_fix_rad_search.cpp64
-rw-r--r--geom_bottleneck/bottleneck/src/ann/bd_pr_search.cpp66
-rw-r--r--geom_bottleneck/bottleneck/src/ann/bd_search.cpp64
-rw-r--r--geom_bottleneck/bottleneck/src/ann/bd_tree.cpp419
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_dump.cpp447
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_fix_rad_search.cpp185
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_pr_search.cpp221
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_search.cpp298
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_split.cpp632
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_tree.cpp560
-rw-r--r--geom_bottleneck/bottleneck/src/ann/kd_util.cpp441
-rw-r--r--geom_bottleneck/bottleneck/src/basic_defs.cpp230
-rw-r--r--geom_bottleneck/bottleneck/src/bottleneck.cpp555
-rw-r--r--geom_bottleneck/bottleneck/src/bound_match.cpp529
-rw-r--r--geom_bottleneck/bottleneck/src/brute.cpp110
-rw-r--r--geom_bottleneck/bottleneck/src/neighb_oracle.cpp278
-rw-r--r--geom_bottleneck/example/bottleneck_dist.cpp55
-rw-r--r--geom_matching/.gitignore32
-rw-r--r--geom_matching/README93
-rw-r--r--geom_matching/wasserstein/example/wasserstein_dist.cpp89
-rw-r--r--geom_matching/wasserstein/include/auction_oracle.h305
-rw-r--r--geom_matching/wasserstein/include/auction_runner_gs.h122
-rw-r--r--geom_matching/wasserstein/include/auction_runner_jac.h97
-rw-r--r--geom_matching/wasserstein/include/basic_defs_ws.h114
-rw-r--r--geom_matching/wasserstein/include/def_debug.h36
-rw-r--r--geom_matching/wasserstein/include/dnn/geometry/euclidean-fixed.h190
-rw-r--r--geom_matching/wasserstein/include/dnn/local/kd-tree.h90
-rw-r--r--geom_matching/wasserstein/include/dnn/local/kd-tree.hpp303
-rw-r--r--geom_matching/wasserstein/include/dnn/local/search-functors.h89
-rw-r--r--geom_matching/wasserstein/include/dnn/parallel/tbb.h220
-rw-r--r--geom_matching/wasserstein/include/dnn/parallel/utils.h94
-rw-r--r--geom_matching/wasserstein/include/dnn/utils.h41
-rw-r--r--geom_matching/wasserstein/include/wasserstein.h110
-rw-r--r--geom_matching/wasserstein/src/auction_oracle.cpp1310
-rw-r--r--geom_matching/wasserstein/src/auction_runner_gs.cpp341
-rw-r--r--geom_matching/wasserstein/src/auction_runner_jac.cpp365
-rw-r--r--geom_matching/wasserstein/src/basic_defs.cpp138
-rw-r--r--geom_matching/wasserstein/src/wasserstein.cpp121
61 files changed, 13403 insertions, 0 deletions
diff --git a/geom_bottleneck/.gitignore b/geom_bottleneck/.gitignore
new file mode 100644
index 0000000..f8d5a1e
--- /dev/null
+++ b/geom_bottleneck/.gitignore
@@ -0,0 +1,37 @@
+/*.cfg
+/build
+*.gitattributes
+*.opensdf
+*.sdf
+*.suo
+*.vcxproj
+*.filters
+*.log
+*.tlog
+*.lastbuildstate
+*.obj
+*.idb
+*.pdb
+*.exe
+*.ilk
+*.user
+*.out
+*.output
+*.pyc
+*.*~*
+*.swp
+*.nfs*
+*.txt
+*.pdf
+*.o
+*.d
+*.dll
+*.lib
+*.exe
+makeout
+/MS_Win32
+/bin
+bottleneck/build/
+ann/lib
+ann/bin
+ann/lib/dummy
diff --git a/geom_bottleneck/COPYING b/geom_bottleneck/COPYING
new file mode 100644
index 0000000..94a9ed0
--- /dev/null
+++ b/geom_bottleneck/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/geom_bottleneck/COPYING.LESSER b/geom_bottleneck/COPYING.LESSER
new file mode 100644
index 0000000..65c5ca8
--- /dev/null
+++ b/geom_bottleneck/COPYING.LESSER
@@ -0,0 +1,165 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/geom_bottleneck/README b/geom_bottleneck/README
new file mode 100644
index 0000000..8dda148
--- /dev/null
+++ b/geom_bottleneck/README
@@ -0,0 +1,98 @@
+Accompanying paper: M. Kerber, D. Morozov, A. Nigmetov. Geometry Helps To Compare Persistence Diagrams (ALENEX 2016, http://www.geometrie.tugraz.at/nigmetov/geom_dist.pdf)
+
+Bug reports can be sent to "nigmetov EMAIL SIGN tugraz DOT at".
+
+# Dependencies
+
+The program uses the ANN library (http://www.cs.umd.edu/~mount/ANN/),
+modified to support deletion of points from k-d trees.
+The modified version is contained in "bottleneck/src/ann" and "bottleneck/iclude/ANN"
+directories, there is no need to build ANN separately or include ANN's headers.
+
+Your compiler must support C++11.
+
+# Usage:
+
+1. To use a standalone command-line utility bottleneck_dist:
+
+bottleneck_dist file1 file2 [relative_error].
+
+If relative error is not supplied, the exact distance is computed.
+file1 and file2 must contain persistence diagrams in plain text format
+(one point per line, empty lines are ignored, comments can be made with #):
+
+# this is how your input can look like
+x_1 y_1 # two real numbers per line
+...
+# empty lines or comments are ignored
+x_n y_n
+
+2. To use from your code:
+
+#include "bottleneck.h"
+
+// All classes and functions are in geom_bt namespace
+// (including modified ANN's classes).
+
+std::vector<std::pair<double, double>> diagram1, diagram2;
+// any container class that supports range-for loops will do.
+// A pair represents a single point,
+// first component = x-coordinate,
+// second component = y-coordinate.
+// ...
+// load your diagrams into diagram1, diagram2 (off-diagonal points).
+// If you data is in plain text format, you can use readDiagramPointSet function:
+
+if (!geom_bt::readDiagramPointSet("diagram1.txt", diagram1)) {
+ // something went wrong: function returns true if it successfully read the file
+ }
+
+// OK: diagram1.txt was read.
+// ...
+// to get exact distance:
+double btDist = geom_bt::bottleneckDistExact(diagram1, diagram2);
+// to get 1% approximation
+double btDistApprox = geom_bt::bottleneckDistApprox(diagram1, diagram2, 0.01);
+// ..............................................................................
+// if diagrams will be used many times, you may want to avoid copying them
+// to DiagramPointSet (which is done internally in each call to
+bottleneckDistExact/bottleneckDistApprox) and do it yourself once.
+// Constructor takes two iterators:
+geom_bt::DiagramPointSet ds1(diagram1.begin(), diagram1.end());
+geom_bt::DiagramPointSet ds2(diagram2.begin(), diagram2.end());
+btDist = geom_bt::bottleneckDistExact(ds1, ds2);
+btDistApprox = geom_bt::bottleneckDistApprox(ds1, ds2, 0.01);
+
+Necessary projections (diagonal points) will be added in the bottleneckDistApprox
+function.
+
+See also code in example/bottleneck_dist.cpp.
+
+# Remarks:
+
+1) If bottleneckDistApprox is called with epsilon = 0.0, it will never return.
+2) Empty diagrams are not considered as error.
+3) Modifications made in the ANN code are only valid for 2-dimensional k-d trees.
+Do not use the modified ANN files from the project folder anywhere else.
+4) You can switch to non-geometric version by using another typedef in
+bottleneck/include/neigb_oracle.h.
+
+# License
+
+The program is distributed under Lesser GPL license.
+
+# Building
+
+CMakeLists.txt in the root directory can be used to make the library (contained
+in bottleneck/ directory) and the command-line utility (in example/ directory)
+to compute the distance between two diagrams in txt files.
+
+On Linux/Mac:
+
+mkdir build
+cd build
+cmake ..
+make
+
+On Windows (checked with Visual Studio 2015, Community version)
+use cmake-gui to create the solution in build directory and build it with VS.
diff --git a/geom_bottleneck/bottleneck/include/ANN/ANN.h b/geom_bottleneck/bottleneck/include/ANN/ANN.h
new file mode 100644
index 0000000..cd48d8e
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/ANN.h
@@ -0,0 +1,906 @@
+//----------------------------------------------------------------------
+// File: ANN.h
+// Programmer: Sunil Arya and David Mount
+// Description: Basic include file for approximate nearest
+// neighbor searching.
+// Last modified: 01/27/10 (Version 1.1.2)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2010 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Added copyright and revision information
+// Added ANNcoordPrec for coordinate precision.
+// Added methods theDim, nPoints, maxPoints, thePoints to ANNpointSet.
+// Cleaned up C++ structure for modern compilers
+// Revision 1.1 05/03/05
+// Added fixed-radius k-NN searching
+// Revision 1.1.2 01/27/10
+// Fixed minor compilation bugs for new versions of gcc
+// --------------------------------------------------------------------
+// 2015 - modified by A. Nigmetov to support deletion of points
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// ANN - approximate nearest neighbor searching
+// ANN is a library for approximate nearest neighbor searching,
+// based on the use of standard and priority search in kd-trees
+// and balanced box-decomposition (bbd) trees. Here are some
+// references to the main algorithmic techniques used here:
+//
+// kd-trees:
+// Friedman, Bentley, and Finkel, ``An algorithm for finding
+// best matches in logarithmic expected time,'' ACM
+// Transactions on Mathematical Software, 3(3):209-226, 1977.
+//
+// Priority search in kd-trees:
+// Arya and Mount, ``Algorithms for fast vector quantization,''
+// Proc. of DCC '93: Data Compression Conference, eds. J. A.
+// Storer and M. Cohn, IEEE Press, 1993, 381-390.
+//
+// Approximate nearest neighbor search and bbd-trees:
+// Arya, Mount, Netanyahu, Silverman, and Wu, ``An optimal
+// algorithm for approximate nearest neighbor searching,''
+// 5th Ann. ACM-SIAM Symposium on Discrete Algorithms,
+// 1994, 573-582.
+//----------------------------------------------------------------------
+
+#ifndef ANN_H
+#define ANN_H
+
+// A. Nigmetov: ANN code is integrated into bottleneck library,
+// CMake will take care of correct __declspec, no need to define DLL_API
+#define DLL_API
+//#ifdef WIN32
+ //----------------------------------------------------------------------
+ // For Microsoft Visual C++, externally accessible symbols must be
+ // explicitly indicated with DLL_API, which is somewhat like "extern."
+ //
+ // The following ifdef block is the standard way of creating macros
+ // which make exporting from a DLL simpler. All files within this DLL
+ // are compiled with the DLL_EXPORTS preprocessor symbol defined on the
+ // command line. In contrast, projects that use (or import) the DLL
+ // objects do not define the DLL_EXPORTS symbol. This way any other
+ // project whose source files include this file see DLL_API functions as
+ // being imported from a DLL, wheras this DLL sees symbols defined with
+ // this macro as being exported.
+ //----------------------------------------------------------------------
+ //#ifdef DLL_EXPORTS
+ // #define DLL_API __declspec(dllexport)
+ //#else
+ //#define DLL_API __declspec(dllimport)
+ //#endif
+ //----------------------------------------------------------------------
+ // DLL_API is ignored for all other systems
+ //----------------------------------------------------------------------
+//#else
+ //#define DLL_API
+//#endif
+
+//----------------------------------------------------------------------
+// basic includes
+//----------------------------------------------------------------------
+
+#include <cstdlib> // standard lib includes
+#include <cmath> // math includes
+#include <iostream> // I/O streams
+#include <cstring> // C-style strings
+#include <vector>
+#include <assert.h>
+
+//----------------------------------------------------------------------
+// Limits
+// There are a number of places where we use the maximum double value as
+// default initializers (and others may be used, depending on the
+// data/distance representation). These can usually be found in limits.h
+// (as LONG_MAX, INT_MAX) or in float.h (as DBL_MAX, FLT_MAX).
+//
+// Not all systems have these files. If you are using such a system,
+// you should set the preprocessor symbol ANN_NO_LIMITS_H when
+// compiling, and modify the statements below to generate the
+// appropriate value. For practical purposes, this does not need to be
+// the maximum double value. It is sufficient that it be at least as
+// large than the maximum squared distance between between any two
+// points.
+//----------------------------------------------------------------------
+#ifdef ANN_NO_LIMITS_H // limits.h unavailable
+ #include <cvalues> // replacement for limits.h
+ const double ANN_DBL_MAX = MAXDOUBLE; // insert maximum double
+#else
+ #include <climits>
+ #include <cfloat>
+ const double ANN_DBL_MAX = DBL_MAX;
+#endif
+
+#define ANNversion "1.1.2" // ANN version and information
+#define ANNversionCmt ""
+#define ANNcopyright "David M. Mount and Sunil Arya"
+#define ANNlatestRev "Jan 27, 2010"
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// ANNbool
+// This is a simple boolean type. Although ANSI C++ is supposed
+// to support the type bool, some compilers do not have it.
+//----------------------------------------------------------------------
+
+
+enum ANNbool {ANNfalse = 0, ANNtrue = 1}; // ANN boolean type (non ANSI C++)
+
+//----------------------------------------------------------------------
+// ANNcoord, ANNdist
+// ANNcoord and ANNdist are the types used for representing
+// point coordinates and distances. They can be modified by the
+// user, with some care. It is assumed that they are both numeric
+// types, and that ANNdist is generally of an equal or higher type
+// from ANNcoord. A variable of type ANNdist should be large
+// enough to store the sum of squared components of a variable
+// of type ANNcoord for the number of dimensions needed in the
+// application. For example, the following combinations are
+// legal:
+//
+// ANNcoord ANNdist
+// --------- -------------------------------
+// short short, int, long, float, double
+// int int, long, float, double
+// long long, float, double
+// float float, double
+// double double
+//
+// It is the user's responsibility to make sure that overflow does
+// not occur in distance calculation.
+//----------------------------------------------------------------------
+
+typedef double ANNcoord; // coordinate data type
+typedef double ANNdist; // distance data type
+
+//----------------------------------------------------------------------
+// ANNidx
+// ANNidx is a point index. When the data structure is built, the
+// points are given as an array. Nearest neighbor results are
+// returned as an integer index into this array. To make it
+// clearer when this is happening, we define the integer type
+// ANNidx. Indexing starts from 0.
+//
+// For fixed-radius near neighbor searching, it is possible that
+// there are not k nearest neighbors within the search radius. To
+// indicate this, the algorithm returns ANN_NULL_IDX as its result.
+// It should be distinguishable from any valid array index.
+//----------------------------------------------------------------------
+
+typedef int ANNidx; // point index
+const ANNidx ANN_NULL_IDX = -1; // a NULL point index
+
+//----------------------------------------------------------------------
+// Infinite distance:
+// The code assumes that there is an "infinite distance" which it
+// uses to initialize distances before performing nearest neighbor
+// searches. It should be as larger or larger than any legitimate
+// nearest neighbor distance.
+//
+// On most systems, these should be found in the standard include
+// file <limits.h> or possibly <float.h>. If you do not have these
+// file, some suggested values are listed below, assuming 64-bit
+// long, 32-bit int and 16-bit short.
+//
+// ANNdist ANN_DIST_INF Values (see <limits.h> or <float.h>)
+// ------- ------------ ------------------------------------
+// double DBL_MAX 1.79769313486231570e+308
+// float FLT_MAX 3.40282346638528860e+38
+// long LONG_MAX 0x7fffffffffffffff
+// int INT_MAX 0x7fffffff
+// short SHRT_MAX 0x7fff
+//----------------------------------------------------------------------
+
+const ANNdist ANN_DIST_INF = ANN_DBL_MAX;
+
+//----------------------------------------------------------------------
+// Significant digits for tree dumps:
+// When floating point coordinates are used, the routine that dumps
+// a tree needs to know roughly how many significant digits there
+// are in a ANNcoord, so it can output points to full precision.
+// This is defined to be ANNcoordPrec. On most systems these
+// values can be found in the standard include files <limits.h> or
+// <float.h>. For integer types, the value is essentially ignored.
+//
+// ANNcoord ANNcoordPrec Values (see <limits.h> or <float.h>)
+// -------- ------------ ------------------------------------
+// double DBL_DIG 15
+// float FLT_DIG 6
+// long doesn't matter 19
+// int doesn't matter 10
+// short doesn't matter 5
+//----------------------------------------------------------------------
+
+#ifdef DBL_DIG // number of sig. bits in ANNcoord
+ const int ANNcoordPrec = DBL_DIG;
+#else
+ const int ANNcoordPrec = 15; // default precision
+#endif
+
+//----------------------------------------------------------------------
+// Self match?
+// In some applications, the nearest neighbor of a point is not
+// allowed to be the point itself. This occurs, for example, when
+// computing all nearest neighbors in a set. By setting the
+// parameter ANN_ALLOW_SELF_MATCH to ANNfalse, the nearest neighbor
+// is the closest point whose distance from the query point is
+// strictly positive.
+//----------------------------------------------------------------------
+
+const ANNbool ANN_ALLOW_SELF_MATCH = ANNtrue;
+
+//----------------------------------------------------------------------
+// Norms and metrics:
+// ANN supports any Minkowski norm for defining distance. In
+// particular, for any p >= 1, the L_p Minkowski norm defines the
+// length of a d-vector (v0, v1, ..., v(d-1)) to be
+//
+// (|v0|^p + |v1|^p + ... + |v(d-1)|^p)^(1/p),
+//
+// (where ^ denotes exponentiation, and |.| denotes absolute
+// value). The distance between two points is defined to be the
+// norm of the vector joining them. Some common distance metrics
+// include
+//
+// Euclidean metric p = 2
+// Manhattan metric p = 1
+// Max metric p = infinity
+//
+// In the case of the max metric, the norm is computed by taking
+// the maxima of the absolute values of the components. ANN is
+// highly "coordinate-based" and does not support general distances
+// functions (e.g. those obeying just the triangle inequality). It
+// also does not support distance functions based on
+// inner-products.
+//
+// For the purpose of computing nearest neighbors, it is not
+// necessary to compute the final power (1/p). Thus the only
+// component that is used by the program is |v(i)|^p.
+//
+// ANN parameterizes the distance computation through the following
+// macros. (Macros are used rather than procedures for
+// efficiency.) Recall that the distance between two points is
+// given by the length of the vector joining them, and the length
+// or norm of a vector v is given by formula:
+//
+// |v| = ROOT(POW(v0) # POW(v1) # ... # POW(v(d-1)))
+//
+// where ROOT, POW are unary functions and # is an associative and
+// commutative binary operator mapping the following types:
+//
+// ** POW: ANNcoord --> ANNdist
+// ** #: ANNdist x ANNdist --> ANNdist
+// ** ROOT: ANNdist (>0) --> double
+//
+// For early termination in distance calculation (partial distance
+// calculation) we assume that POW and # together are monotonically
+// increasing on sequences of arguments, meaning that for all
+// v0..vk and y:
+//
+// POW(v0) #...# POW(vk) <= (POW(v0) #...# POW(vk)) # POW(y).
+//
+// Incremental Distance Calculation:
+// The program uses an optimized method of computing distances for
+// kd-trees and bd-trees, called incremental distance calculation.
+// It is used when distances are to be updated when only a single
+// coordinate of a point has been changed. In order to use this,
+// we assume that there is an incremental update function DIFF(x,y)
+// for #, such that if:
+//
+// s = x0 # ... # xi # ... # xk
+//
+// then if s' is equal to s but with xi replaced by y, that is,
+//
+// s' = x0 # ... # y # ... # xk
+//
+// then the length of s' can be computed by:
+//
+// |s'| = |s| # DIFF(xi,y).
+//
+// Thus, if # is + then DIFF(xi,y) is (yi-x). For the L_infinity
+// norm we make use of the fact that in the program this function
+// is only invoked when y > xi, and hence DIFF(xi,y)=y.
+//
+// Finally, for approximate nearest neighbor queries we assume
+// that POW and ROOT are related such that
+//
+// v*ROOT(x) = ROOT(POW(v)*x)
+//
+// Here are the values for the various Minkowski norms:
+//
+// L_p: p even: p odd:
+// ------------------------- ------------------------
+// POW(v) = v^p POW(v) = |v|^p
+// ROOT(x) = x^(1/p) ROOT(x) = x^(1/p)
+// # = + # = +
+// DIFF(x,y) = y - x DIFF(x,y) = y - x
+//
+// L_inf:
+// POW(v) = |v|
+// ROOT(x) = x
+// # = max
+// DIFF(x,y) = y
+//
+// By default the Euclidean norm is assumed. To change the norm,
+// uncomment the appropriate set of macros below.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// Use the following for the Euclidean norm
+//----------------------------------------------------------------------
+//#define ANN_POW(v) ((v)*(v))
+//#define ANN_ROOT(x) sqrt(x)
+//#define ANN_SUM(x,y) ((x) + (y))
+//#define ANN_DIFF(x,y) ((y) - (x))
+
+//----------------------------------------------------------------------
+// Use the following for the L_1 (Manhattan) norm
+//----------------------------------------------------------------------
+// #define ANN_POW(v) fabs(v)
+// #define ANN_ROOT(x) (x)
+// #define ANN_SUM(x,y) ((x) + (y))
+// #define ANN_DIFF(x,y) ((y) - (x))
+
+//----------------------------------------------------------------------
+// Use the following for a general L_p norm
+//----------------------------------------------------------------------
+// #define ANN_POW(v) pow(fabs(v),p)
+// #define ANN_ROOT(x) pow(fabs(x),1/p)
+// #define ANN_SUM(x,y) ((x) + (y))
+// #define ANN_DIFF(x,y) ((y) - (x))
+
+//----------------------------------------------------------------------
+// Use the following for the L_infinity (Max) norm
+//----------------------------------------------------------------------
+#define ANN_POW(v) fabs(v)
+#define ANN_ROOT(x) (x)
+#define ANN_SUM(x,y) ((x) > (y) ? (x) : (y))
+#define ANN_DIFF(x,y) (y)
+
+//----------------------------------------------------------------------
+// Array types
+// The following array types are of basic interest. A point is
+// just a dimensionless array of coordinates, a point array is a
+// dimensionless array of points. A distance array is a
+// dimensionless array of distances and an index array is a
+// dimensionless array of point indices. The latter two are used
+// when returning the results of k-nearest neighbor queries.
+//----------------------------------------------------------------------
+
+typedef ANNcoord* ANNpoint; // a point
+typedef ANNpoint* ANNpointArray; // an array of points
+typedef ANNdist* ANNdistArray; // an array of distances
+typedef ANNidx* ANNidxArray; // an array of point indices
+
+//----------------------------------------------------------------------
+// Basic point and array utilities:
+// The following procedures are useful supplements to ANN's nearest
+// neighbor capabilities.
+//
+// annDist():
+// Computes the (squared) distance between a pair of points.
+// Note that this routine is not used internally by ANN for
+// computing distance calculations. For reasons of efficiency
+// this is done using incremental distance calculation. Thus,
+// this routine cannot be modified as a method of changing the
+// metric.
+//
+// Because points (somewhat like strings in C) are stored as
+// pointers. Consequently, creating and destroying copies of
+// points may require storage allocation. These procedures do
+// this.
+//
+// annAllocPt() and annDeallocPt():
+// Allocate a deallocate storage for a single point, and
+// return a pointer to it. The argument to AllocPt() is
+// used to initialize all components.
+//
+// annAllocPts() and annDeallocPts():
+// Allocate and deallocate an array of points as well a
+// place to store their coordinates, and initializes the
+// points to point to their respective coordinates. It
+// allocates point storage in a contiguous block large
+// enough to store all the points. It performs no
+// initialization.
+//
+// annCopyPt():
+// Creates a copy of a given point, allocating space for
+// the new point. It returns a pointer to the newly
+// allocated copy.
+//----------------------------------------------------------------------
+
+DLL_API ANNdist annDist(
+ int dim, // dimension of space
+ ANNpoint p, // points
+ ANNpoint q);
+
+DLL_API ANNpoint annAllocPt(
+ int dim, // dimension
+ ANNcoord c = 0); // coordinate value (all equal)
+
+DLL_API ANNpointArray annAllocPts(
+ int n, // number of points
+ int dim); // dimension
+
+DLL_API void annDeallocPt(
+ ANNpoint &p); // deallocate 1 point
+
+DLL_API void annDeallocPts(
+ ANNpointArray &pa); // point array
+
+DLL_API ANNpoint annCopyPt(
+ int dim, // dimension
+ ANNpoint source); // point to copy
+
+
+//----------------------------------------------------------------------
+// Orthogonal (axis aligned) rectangle
+// Orthogonal rectangles are represented by two points, one
+// for the lower left corner (min coordinates) and the other
+// for the upper right corner (max coordinates).
+//
+// The constructor initializes from either a pair of coordinates,
+// pair of points, or another rectangle. Note that all constructors
+// allocate new point storage. The destructor deallocates this
+// storage.
+//
+// BEWARE: Orthogonal rectangles should be passed ONLY BY REFERENCE.
+// (C++'s default copy constructor will not allocate new point
+// storage, then on return the destructor free's storage, and then
+// you get into big trouble in the calling procedure.)
+//----------------------------------------------------------------------
+
+class DLL_API ANNorthRect {
+public:
+ ANNpoint lo; // rectangle lower bounds
+ ANNpoint hi; // rectangle upper bounds
+//
+ ANNorthRect( // basic constructor
+ int dd, // dimension of space
+ ANNcoord l=0, // default is empty
+ ANNcoord h=0)
+ { lo = annAllocPt(dd, l); hi = annAllocPt(dd, h); }
+
+ ANNorthRect( // (almost a) copy constructor
+ int dd, // dimension
+ const ANNorthRect &r) // rectangle to copy
+ { lo = annCopyPt(dd, r.lo); hi = annCopyPt(dd, r.hi); }
+
+ ANNorthRect( // construct from points
+ int dd, // dimension
+ ANNpoint l, // low point
+ ANNpoint h) // hight point
+ { lo = annCopyPt(dd, l); hi = annCopyPt(dd, h); }
+
+ ~ANNorthRect() // destructor
+ { annDeallocPt(lo); annDeallocPt(hi); }
+
+ ANNbool inside(const int dim, ANNpoint p) const;// is point p inside rectangle?
+ bool contains(const int dim, const ANNorthRect& r) const;
+ bool intersects(const int dim, const ANNorthRect& r) const;
+};
+
+
+//----------------------------------------------------------------------
+//Overall structure: ANN supports a number of different data structures
+//for approximate and exact nearest neighbor searching. These are:
+//
+// ANNbruteForce A simple brute-force search structure.
+// ANNkd_tree A kd-tree tree search structure. ANNbd_tree
+// A bd-tree tree search structure (a kd-tree with shrink
+// capabilities).
+//
+// At a minimum, each of these data structures support k-nearest
+// neighbor queries. The nearest neighbor query, annkSearch,
+// returns an integer identifier and the distance to the nearest
+// neighbor(s) and annRangeSearch returns the nearest points that
+// lie within a given query ball.
+//
+// Each structure is built by invoking the appropriate constructor
+// and passing it (at a minimum) the array of points, the total
+// number of points and the dimension of the space. Each structure
+// is also assumed to support a destructor and member functions
+// that return basic information about the point set.
+//
+// Note that the array of points is not copied by the data
+// structure (for reasons of space efficiency), and it is assumed
+// to be constant throughout the lifetime of the search structure.
+//
+// The search algorithm, annkSearch, is given the query point (q),
+// and the desired number of nearest neighbors to report (k), and
+// the error bound (eps) (whose default value is 0, implying exact
+// nearest neighbors). It returns two arrays which are assumed to
+// contain at least k elements: one (nn_idx) contains the indices
+// (within the point array) of the nearest neighbors and the other
+// (dd) contains the squared distances to these nearest neighbors.
+//
+// The search algorithm, annkFRSearch, is a fixed-radius kNN
+// search. In addition to a query point, it is given a (squared)
+// radius bound. (This is done for consistency, because the search
+// returns distances as squared quantities.) It does two things.
+// First, it computes the k nearest neighbors within the radius
+// bound, and second, it returns the total number of points lying
+// within the radius bound. It is permitted to set k = 0, in which
+// case it effectively answers a range counting query. If the
+// error bound epsilon is positive, then the search is approximate
+// in the sense that it is free to ignore any point that lies
+// outside a ball of radius r/(1+epsilon), where r is the given
+// (unsquared) radius bound.
+//
+// The generic object from which all the search structures are
+// dervied is given below. It is a virtual object, and is useless
+// by itself.
+//----------------------------------------------------------------------
+
+class DLL_API ANNpointSet {
+public:
+ virtual ~ANNpointSet() {} // virtual distructor
+
+ virtual void annkSearch( // approx k near neighbor search
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor array (modified)
+ ANNdistArray dd, // dist to near neighbors (modified)
+ double eps=0.0 // error bound
+ ) = 0; // pure virtual (defined elsewhere)
+
+ virtual int annkFRSearch( // approx fixed-radius kNN search
+ ANNpoint q, // query point
+ ANNdist sqRad, // squared radius
+ int k = 0, // number of near neighbors to return
+ ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
+ ANNdistArray dd = NULL, // dist to near neighbors (modified)
+ double eps=0.0 // error bound
+ ) = 0; // pure virtual (defined elsewhere)
+
+ virtual int theDim() = 0; // return dimension of space
+ virtual int nPoints() = 0; // return number of points
+ // return pointer to points
+ virtual ANNpointArray thePoints() = 0;
+};
+
+//----------------------------------------------------------------------
+// Brute-force nearest neighbor search:
+// The brute-force search structure is very simple but inefficient.
+// It has been provided primarily for the sake of comparison with
+// and validation of the more complex search structures.
+//
+// Query processing is the same as described above, but the value
+// of epsilon is ignored, since all distance calculations are
+// performed exactly.
+//
+// WARNING: This data structure is very slow, and should not be
+// used unless the number of points is very small.
+//
+// Internal information:
+// ---------------------
+// This data structure bascially consists of the array of points
+// (each a pointer to an array of coordinates). The search is
+// performed by a simple linear scan of all the points.
+//----------------------------------------------------------------------
+
+class DLL_API ANNbruteForce: public ANNpointSet {
+ int dim; // dimension
+ int n_pts; // number of points
+ ANNpointArray pts; // point array
+public:
+ ANNbruteForce( // constructor from point array
+ ANNpointArray pa, // point array
+ int n, // number of points
+ int dd); // dimension
+
+ ~ANNbruteForce(); // destructor
+
+ void annkSearch( // approx k near neighbor search
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor array (modified)
+ ANNdistArray dd, // dist to near neighbors (modified)
+ double eps=0.0); // error bound
+
+ int annkFRSearch( // approx fixed-radius kNN search
+ ANNpoint q, // query point
+ ANNdist sqRad, // squared radius
+ int k = 0, // number of near neighbors to return
+ ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
+ ANNdistArray dd = NULL, // dist to near neighbors (modified)
+ double eps=0.0); // error bound
+
+ int theDim() // return dimension of space
+ { return dim; }
+
+ int nPoints() // return number of points
+ { return n_pts; }
+
+ ANNpointArray thePoints() // return pointer to points
+ { return pts; }
+};
+
+//----------------------------------------------------------------------
+// kd- and bd-tree splitting and shrinking rules
+// kd-trees supports a collection of different splitting rules.
+// In addition to the standard kd-tree splitting rule proposed
+// by Friedman, Bentley, and Finkel, we have introduced a
+// number of other splitting rules, which seem to perform
+// as well or better (for the distributions we have tested).
+//
+// The splitting methods given below allow the user to tailor
+// the data structure to the particular data set. They are
+// are described in greater details in the kd_split.cc source
+// file. The method ANN_KD_SUGGEST is the method chosen (rather
+// subjectively) by the implementors as the one giving the
+// fastest performance, and is the default splitting method.
+//
+// As with splitting rules, there are a number of different
+// shrinking rules. The shrinking rule ANN_BD_NONE does no
+// shrinking (and hence produces a kd-tree tree). The rule
+// ANN_BD_SUGGEST uses the implementors favorite rule.
+//----------------------------------------------------------------------
+
+enum ANNsplitRule {
+ ANN_KD_STD = 0, // the optimized kd-splitting rule
+ ANN_KD_MIDPT = 1, // midpoint split
+ ANN_KD_FAIR = 2, // fair split
+ ANN_KD_SL_MIDPT = 3, // sliding midpoint splitting method
+ ANN_KD_SL_FAIR = 4, // sliding fair split method
+ ANN_KD_SUGGEST = 5, // the authors' suggestion for best
+ // for kd-trees with deletion
+ //ANN_KD_STD_WD = 6,
+ //ANN_KD_MIDPT_WD = 7,
+ //ANN_KD_SL_MIDPT_WD = 8
+ };
+const int ANN_N_SPLIT_RULES = 6; // number of split rules
+//const int ANN_N_SPLIT_RULES = 9; // number of split rules
+
+enum ANNshrinkRule {
+ ANN_BD_NONE = 0, // no shrinking at all (just kd-tree)
+ ANN_BD_SIMPLE = 1, // simple splitting
+ ANN_BD_CENTROID = 2, // centroid splitting
+ ANN_BD_SUGGEST = 3}; // the authors' suggested choice
+const int ANN_N_SHRINK_RULES = 4; // number of shrink rules
+
+//----------------------------------------------------------------------
+// kd-tree:
+// The main search data structure supported by ANN is a kd-tree.
+// The main constructor is given a set of points and a choice of
+// splitting method to use in building the tree.
+//
+// Construction:
+// -------------
+// The constructor is given the point array, number of points,
+// dimension, bucket size (default = 1), and the splitting rule
+// (default = ANN_KD_SUGGEST). The point array is not copied, and
+// is assumed to be kept constant throughout the lifetime of the
+// search structure. There is also a "load" constructor that
+// builds a tree from a file description that was created by the
+// Dump operation.
+//
+// Search:
+// -------
+// There are two search methods:
+//
+// Standard search (annkSearch()):
+// Searches nodes in tree-traversal order, always visiting
+// the closer child first.
+// Priority search (annkPriSearch()):
+// Searches nodes in order of increasing distance of the
+// associated cell from the query point. For many
+// distributions the standard search seems to work just
+// fine, but priority search is safer for worst-case
+// performance.
+//
+// Printing:
+// ---------
+// There are two methods provided for printing the tree. Print()
+// is used to produce a "human-readable" display of the tree, with
+// indenation, which is handy for debugging. Dump() produces a
+// format that is suitable reading by another program. There is a
+// "load" constructor, which constructs a tree which is assumed to
+// have been saved by the Dump() procedure.
+//
+// Performance and Structure Statistics:
+// -------------------------------------
+// The procedure getStats() collects statistics information on the
+// tree (its size, height, etc.) See ANNperf.h for information on
+// the stats structure it returns.
+//
+// Internal information:
+// ---------------------
+// The data structure consists of three major chunks of storage.
+// The first (implicit) storage are the points themselves (pts),
+// which have been provided by the users as an argument to the
+// constructor, or are allocated dynamically if the tree is built
+// using the load constructor). These should not be changed during
+// the lifetime of the search structure. It is the user's
+// responsibility to delete these after the tree is destroyed.
+//
+// The second is the tree itself (which is dynamically allocated in
+// the constructor) and is given as a pointer to its root node
+// (root). These nodes are automatically deallocated when the tree
+// is deleted. See the file src/kd_tree.h for further information
+// on the structure of the tree nodes.
+//
+// Each leaf of the tree does not contain a pointer directly to a
+// point, but rather contains a pointer to a "bucket", which is an
+// array consisting of point indices. The third major chunk of
+// storage is an array (pidx), which is a large array in which all
+// these bucket subarrays reside. (The reason for storing them
+// separately is the buckets are typically small, but of varying
+// sizes. This was done to avoid fragmentation.) This array is
+// also deallocated when the tree is deleted.
+//
+// In addition to this, the tree consists of a number of other
+// pieces of information which are used in searching and for
+// subsequent tree operations. These consist of the following:
+//
+// dim Dimension of space
+// n_pts Number of points currently in the tree
+// n_max Maximum number of points that are allowed
+// in the tree
+// bkt_size Maximum bucket size (no. of points per leaf)
+// bnd_box_lo Bounding box low point
+// bnd_box_hi Bounding box high point
+// splitRule Splitting method used
+//
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// Some types and objects used by kd-tree functions
+// See src/kd_tree.h and src/kd_tree.cpp for definitions
+//----------------------------------------------------------------------
+class ANNkdStats; // stats on kd-tree
+class ANNkd_node; // generic node in a kd-tree
+typedef ANNkd_node* ANNkd_ptr; // pointer to a kd-tree node
+class ANNkd_leaf;
+
+class DLL_API ANNkd_tree: public ANNpointSet {
+protected:
+ int dim; // dimension of space
+ int n_pts; // number of points in tree
+ int bkt_size; // bucket size
+ ANNpointArray pts; // the points
+ ANNidxArray pidx; // point indices (to pts array)
+ ANNkd_ptr root; // root of kd-tree
+ ANNpoint bnd_box_lo; // bounding box low point
+ ANNpoint bnd_box_hi; // bounding box high point
+
+ void SkeletonTree( // construct skeleton tree
+ int n, // number of points
+ int dd, // dimension
+ int bs, // bucket size
+ ANNpointArray pa = NULL, // point array (optional)
+ ANNidxArray pi = NULL); // point indices (optional)
+
+public:
+ ANNkd_tree( // build skeleton tree
+ int n = 0, // number of points
+ int dd = 0, // dimension
+ int bs = 1); // bucket size
+
+ ANNkd_tree( // build from point array
+ ANNpointArray pa, // point array
+ int n, // number of points
+ int dd, // dimension
+ int bs = 1, // bucket size
+ ANNsplitRule split = ANN_KD_SUGGEST); // splitting method
+
+ ANNkd_tree( // build from dump file
+ std::istream& in); // input stream for dump file
+
+ ~ANNkd_tree(); // tree destructor
+
+ void annkSearch( // approx k near neighbor search
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor array (modified)
+ ANNdistArray dd, // dist to near neighbors (modified)
+ double eps=0.0); // error bound
+
+ void annkPriSearch( // priority k near neighbor search
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor array (modified)
+ ANNdistArray dd, // dist to near neighbors (modified)
+ double eps=0.0); // error bound
+
+ int annkFRSearch( // approx fixed-radius kNN search
+ ANNpoint q, // the query point
+ ANNdist sqRad, // squared radius of query ball
+ int k, // number of neighbors to return
+ ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
+ ANNdistArray dd = NULL, // dist to near neighbors (modified)
+ double eps=0.0); // error bound
+
+ int theDim() // return dimension of space
+ { return dim; }
+
+ int nPoints() // return number of points
+ { return n_pts; }
+
+ ANNpointArray thePoints() // return pointer to points
+ { return pts; }
+
+ virtual void Print( // print the tree (for debugging)
+ ANNbool with_pts, // print points as well?
+ std::ostream& out); // output stream
+
+ virtual void Dump( // dump entire tree
+ ANNbool with_pts, // print points as well?
+ std::ostream& out); // output stream
+
+ virtual void getStats( // compute tree statistics
+ ANNkdStats& st); // the statistics (modified)
+
+ ///////////////////////////////////////////////////////////////
+ // for deletion
+ std::vector<ANNkd_leaf*> pointToLeafVec;
+ std::vector<bool> isDeleted; // will be used to check implementation;
+ //TODO remove after testing
+ void delete_point(const int point_idx);
+ int actual_num_points;
+ int getActualNumPoints(void) const { return actual_num_points; }
+ void range_search(const ANNorthRect& region, std::vector<size_t>& pointIdices);
+};
+
+//----------------------------------------------------------------------
+// Box decomposition tree (bd-tree)
+// The bd-tree is inherited from a kd-tree. The main difference
+// in the bd-tree and the kd-tree is a new type of internal node
+// called a shrinking node (in the kd-tree there is only one type
+// of internal node, a splitting node). The shrinking node
+// makes it possible to generate balanced trees in which the
+// cells have bounded aspect ratio, by allowing the decomposition
+// to zoom in on regions of dense point concentration. Although
+// this is a nice idea in theory, few point distributions are so
+// densely clustered that this is really needed.
+//----------------------------------------------------------------------
+
+class DLL_API ANNbd_tree: public ANNkd_tree {
+public:
+ ANNbd_tree( // build skeleton tree
+ int n, // number of points
+ int dd, // dimension
+ int bs = 1) // bucket size
+ : ANNkd_tree(n, dd, bs) {} // build base kd-tree
+
+ ANNbd_tree( // build from point array
+ ANNpointArray pa, // point array
+ int n, // number of points
+ int dd, // dimension
+ int bs = 1, // bucket size
+ ANNsplitRule split = ANN_KD_SUGGEST, // splitting rule
+ ANNshrinkRule shrink = ANN_BD_SUGGEST); // shrinking rule
+
+ ANNbd_tree( // build from dump file
+ std::istream& in); // input stream for dump file
+};
+
+//----------------------------------------------------------------------
+// Other functions
+// annMaxPtsVisit Sets a limit on the maximum number of points
+// to visit in the search.
+// annClose Can be called when all use of ANN is finished.
+// It clears up a minor memory leak.
+//----------------------------------------------------------------------
+
+DLL_API void annMaxPtsVisit( // max. pts to visit in search
+ int maxPts); // the limit
+
+DLL_API void annClose(); // called to end use of ANN
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/ANNperf.h b/geom_bottleneck/bottleneck/include/ANN/ANNperf.h
new file mode 100644
index 0000000..d242266
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/ANNperf.h
@@ -0,0 +1,225 @@
+//----------------------------------------------------------------------
+// File: ANNperf.h
+// Programmer: Sunil Arya and David Mount
+// Last modified: 03/04/98 (Release 0.1)
+// Description: Include file for ANN performance stats
+//
+// Some of the code for statistics gathering has been adapted
+// from the SmplStat.h package in the g++ library.
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Added ANN_ prefix to avoid name conflicts.
+//----------------------------------------------------------------------
+
+#ifndef ANNperf_H
+#define ANNperf_H
+
+//----------------------------------------------------------------------
+// basic includes
+//----------------------------------------------------------------------
+
+#include <ANN/ANN.h> // basic ANN includes
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// kd-tree stats object
+// This object is used for collecting information about a kd-tree
+// or bd-tree.
+//----------------------------------------------------------------------
+
+class ANNkdStats { // stats on kd-tree
+public:
+ int dim; // dimension of space
+ int n_pts; // no. of points
+ int bkt_size; // bucket size
+ int n_lf; // no. of leaves (including trivial)
+ int n_tl; // no. of trivial leaves (no points)
+ int n_spl; // no. of splitting nodes
+ int n_shr; // no. of shrinking nodes (for bd-trees)
+ int depth; // depth of tree
+ float sum_ar; // sum of leaf aspect ratios
+ float avg_ar; // average leaf aspect ratio
+ //
+ // reset stats
+ void reset(int d=0, int n=0, int bs=0)
+ {
+ dim = d; n_pts = n; bkt_size = bs;
+ n_lf = n_tl = n_spl = n_shr = depth = 0;
+ sum_ar = avg_ar = 0.0;
+ }
+
+ ANNkdStats() // basic constructor
+ { reset(); }
+
+ void merge(const ANNkdStats &st); // merge stats from child
+};
+
+//----------------------------------------------------------------------
+// ANNsampStat
+// A sample stat collects numeric (double) samples and returns some
+// simple statistics. Its main functions are:
+//
+// reset() Reset to no samples.
+// += x Include sample x.
+// samples() Return number of samples.
+// mean() Return mean of samples.
+// stdDev() Return standard deviation
+// min() Return minimum of samples.
+// max() Return maximum of samples.
+//----------------------------------------------------------------------
+class DLL_API ANNsampStat {
+ int n; // number of samples
+ double sum; // sum
+ double sum2; // sum of squares
+ double minVal, maxVal; // min and max
+public :
+ void reset() // reset everything
+ {
+ n = 0;
+ sum = sum2 = 0;
+ minVal = ANN_DBL_MAX;
+ maxVal = -ANN_DBL_MAX;
+ }
+
+ ANNsampStat() { reset(); } // constructor
+
+ void operator+=(double x) // add sample
+ {
+ n++; sum += x; sum2 += x*x;
+ if (x < minVal) minVal = x;
+ if (x > maxVal) maxVal = x;
+ }
+
+ int samples() { return n; } // number of samples
+
+ double mean() { return sum/n; } // mean
+
+ // standard deviation
+ double stdDev() { return sqrt((sum2 - (sum*sum)/n)/(n-1));}
+
+ double min() { return minVal; } // minimum
+ double max() { return maxVal; } // maximum
+};
+
+//----------------------------------------------------------------------
+// Operation count updates
+//----------------------------------------------------------------------
+
+#ifdef ANN_PERF
+ #define ANN_FLOP(n) {ann_Nfloat_ops += (n);}
+ #define ANN_LEAF(n) {ann_Nvisit_lfs += (n);}
+ #define ANN_SPL(n) {ann_Nvisit_spl += (n);}
+ #define ANN_SHR(n) {ann_Nvisit_shr += (n);}
+ #define ANN_PTS(n) {ann_Nvisit_pts += (n);}
+ #define ANN_COORD(n) {ann_Ncoord_hts += (n);}
+#else
+ #define ANN_FLOP(n)
+ #define ANN_LEAF(n)
+ #define ANN_SPL(n)
+ #define ANN_SHR(n)
+ #define ANN_PTS(n)
+ #define ANN_COORD(n)
+#endif
+
+//----------------------------------------------------------------------
+// Performance statistics
+// The following data and routines are used for computing performance
+// statistics for nearest neighbor searching. Because these routines
+// can slow the code down, they can be activated and deactiviated by
+// defining the ANN_PERF variable, by compiling with the option:
+// -DANN_PERF
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// Global counters for performance measurement
+//
+// visit_lfs The number of leaf nodes visited in the
+// tree.
+//
+// visit_spl The number of splitting nodes visited in the
+// tree.
+//
+// visit_shr The number of shrinking nodes visited in the
+// tree.
+//
+// visit_pts The number of points visited in all the
+// leaf nodes visited. Equivalently, this
+// is the number of points for which distance
+// calculations are performed.
+//
+// coord_hts The number of times a coordinate of a
+// data point is accessed. This is generally
+// less than visit_pts*d if partial distance
+// calculation is used. This count is low
+// in the sense that if a coordinate is hit
+// many times in the same routine we may
+// count it only once.
+//
+// float_ops The number of floating point operations.
+// This includes all operations in the heap
+// as well as distance calculations to boxes.
+//
+// average_err The average error of each query (the
+// error of the reported point to the true
+// nearest neighbor). For k nearest neighbors
+// the error is computed k times.
+//
+// rank_err The rank error of each query (the difference
+// in the rank of the reported point and its
+// true rank).
+//
+// data_pts The number of data points. This is not
+// a counter, but used in stats computation.
+//----------------------------------------------------------------------
+
+extern int ann_Ndata_pts; // number of data points
+extern int ann_Nvisit_lfs; // number of leaf nodes visited
+extern int ann_Nvisit_spl; // number of splitting nodes visited
+extern int ann_Nvisit_shr; // number of shrinking nodes visited
+extern int ann_Nvisit_pts; // visited points for one query
+extern int ann_Ncoord_hts; // coordinate hits for one query
+extern int ann_Nfloat_ops; // floating ops for one query
+extern ANNsampStat ann_visit_lfs; // stats on leaf nodes visits
+extern ANNsampStat ann_visit_spl; // stats on splitting nodes visits
+extern ANNsampStat ann_visit_shr; // stats on shrinking nodes visits
+extern ANNsampStat ann_visit_nds; // stats on total nodes visits
+extern ANNsampStat ann_visit_pts; // stats on points visited
+extern ANNsampStat ann_coord_hts; // stats on coordinate hits
+extern ANNsampStat ann_float_ops; // stats on floating ops
+//----------------------------------------------------------------------
+// The following need to be part of the public interface, because
+// they are accessed outside the DLL in ann_test.cpp.
+//----------------------------------------------------------------------
+DLL_API extern ANNsampStat ann_average_err; // average error
+DLL_API extern ANNsampStat ann_rank_err; // rank error
+
+//----------------------------------------------------------------------
+// Declaration of externally accessible routines for statistics
+//----------------------------------------------------------------------
+
+DLL_API void annResetStats(int data_size); // reset stats for a set of queries
+
+DLL_API void annResetCounts(); // reset counts for one queries
+
+DLL_API void annUpdateStats(); // update stats with current counts
+
+DLL_API void annPrintStats(ANNbool validate); // print statistics for a run
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/ANNx.h b/geom_bottleneck/bottleneck/include/ANN/ANNx.h
new file mode 100644
index 0000000..0c9e190
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/ANNx.h
@@ -0,0 +1,127 @@
+//----------------------------------------------------------------------
+// File: ANNx.h
+// Programmer: Sunil Arya and David Mount
+// Description: Internal include file for ANN
+// Last modified: 01/27/10 (Version 1.1.2)
+//
+// These declarations are of use in manipulating some of
+// the internal data objects appearing in ANN, but are not
+// needed for applications just using the nearest neighbor
+// search.
+//
+// Typical users of ANN should not need to access this file.
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2010 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Changed LO, HI, IN, OUT to ANN_LO, ANN_HI, etc.
+// Revision 1.1.2 01/27/10
+// Fixed minor compilation bugs for new versions of gcc
+//----------------------------------------------------------------------
+
+#ifndef ANNx_H
+#define ANNx_H
+
+#include <iomanip> // I/O manipulators
+#include <ANN/ANN.h> // ANN includes
+
+namespace geom_bt {
+
+//----------------------------------------------------------------------
+// Global constants and types
+//----------------------------------------------------------------------
+enum {ANN_LO=0, ANN_HI=1}; // splitting indices
+enum {ANN_IN=0, ANN_OUT=1}; // shrinking indices
+ // what to do in case of error
+enum ANNerr {ANNwarn = 0, ANNabort = 1};
+
+//----------------------------------------------------------------------
+// Maximum number of points to visit
+// We have an option for terminating the search early if the
+// number of points visited exceeds some threshold. If the
+// threshold is 0 (its default) this means there is no limit
+// and the algorithm applies its normal termination condition.
+//----------------------------------------------------------------------
+
+extern int ANNmaxPtsVisited; // maximum number of pts visited
+extern int ANNptsVisited; // number of pts visited in search
+
+//----------------------------------------------------------------------
+// Global function declarations
+//----------------------------------------------------------------------
+
+void annError( // ANN error routine
+ const char* msg, // error message
+ ANNerr level); // level of error
+
+void annPrintPt( // print a point
+ ANNpoint pt, // the point
+ int dim, // the dimension
+ std::ostream &out); // output stream
+
+void annAssignRect( // assign one rect to another
+ int dim, // dimension (both must be same)
+ ANNorthRect &dest, // destination (modified)
+ const ANNorthRect &source); // source
+
+//----------------------------------------------------------------------
+// Orthogonal (axis aligned) halfspace
+// An orthogonal halfspace is represented by an integer cutting
+// dimension cd, coordinate cutting value, cv, and side, sd, which is
+// either +1 or -1. Our convention is that point q lies in the (closed)
+// halfspace if (q[cd] - cv)*sd >= 0.
+//----------------------------------------------------------------------
+
+class ANNorthHalfSpace {
+public:
+ int cd; // cutting dimension
+ ANNcoord cv; // cutting value
+ int sd; // which side
+//
+ ANNorthHalfSpace() // default constructor
+ { cd = 0; cv = 0; sd = 0; }
+
+ ANNorthHalfSpace( // basic constructor
+ int cdd, // dimension of space
+ ANNcoord cvv, // cutting value
+ int sdd) // side
+ { cd = cdd; cv = cvv; sd = sdd; }
+
+ ANNbool in(ANNpoint q) const // is q inside halfspace?
+ { return (ANNbool) ((q[cd] - cv)*sd >= 0); }
+
+ ANNbool out(ANNpoint q) const // is q outside halfspace?
+ { return (ANNbool) ((q[cd] - cv)*sd < 0); }
+
+ ANNdist dist(ANNpoint q) const // (squared) distance from q
+ { return (ANNdist) ANN_POW(q[cd] - cv); }
+
+ void setLowerBound(int d, ANNpoint p)// set to lower bound at p[i]
+ { cd = d; cv = p[d]; sd = +1; }
+
+ void setUpperBound(int d, ANNpoint p)// set to upper bound at p[i]
+ { cd = d; cv = p[d]; sd = -1; }
+
+ void project(ANNpoint &q) // project q (modified) onto halfspace
+ { if (out(q)) q[cd] = cv; }
+};
+
+ // array of halfspaces
+typedef ANNorthHalfSpace *ANNorthHSArray;
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/bd_tree.h b/geom_bottleneck/bottleneck/include/ANN/bd_tree.h
new file mode 100644
index 0000000..0791429
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/bd_tree.h
@@ -0,0 +1,102 @@
+//----------------------------------------------------------------------
+// File: bd_tree.h
+// Programmer: David Mount
+// Description: Declarations for standard bd-tree routines
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Changed IN, OUT to ANN_IN, ANN_OUT
+//----------------------------------------------------------------------
+
+#ifndef ANN_bd_tree_H
+#define ANN_bd_tree_H
+
+#include <ANN/ANNx.h> // all ANN includes
+#include "kd_tree.h" // kd-tree includes
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// bd-tree shrinking node.
+// The main addition in the bd-tree is the shrinking node, which
+// is declared here.
+//
+// Shrinking nodes are defined by list of orthogonal halfspaces.
+// These halfspaces define a (possibly unbounded) orthogonal
+// rectangle. There are two children, in and out. Points that
+// lie within this rectangle are stored in the in-child, and the
+// other points are stored in the out-child.
+//
+// We use a list of orthogonal halfspaces rather than an
+// orthogonal rectangle object because typically the number of
+// sides of the shrinking box will be much smaller than the
+// worst case bound of 2*dim.
+//
+// BEWARE: Note that constructor just copies the pointer to the
+// bounding array, but the destructor deallocates it. This is
+// rather poor practice, but happens to be convenient. The list
+// is allocated in the bd-tree building procedure rbd_tree() just
+// prior to construction, and is used for no other purposes.
+//
+// WARNING: In the near neighbor searching code it is assumed that
+// the list of bounding halfspaces is irredundant, meaning that there
+// are no two distinct halfspaces in the list with the same outward
+// pointing normals.
+//----------------------------------------------------------------------
+
+class ANNbd_shrink : public ANNkd_node // splitting node of a kd-tree
+{
+ int n_bnds; // number of bounding halfspaces
+ ANNorthHSArray bnds; // list of bounding halfspaces
+ ANNkd_ptr child[2]; // in and out children
+public:
+ ANNbd_shrink( // constructor
+ int nb, // number of bounding halfspaces
+ ANNorthHSArray bds, // list of bounding halfspaces
+ ANNkd_ptr ic=NULL, ANNkd_ptr oc=NULL) // children
+ {
+ n_bnds = nb; // cutting dimension
+ bnds = bds; // assign bounds
+ child[ANN_IN] = ic; // set children
+ child[ANN_OUT] = oc;
+ }
+
+ ~ANNbd_shrink() // destructor
+ {
+ if (child[ANN_IN]!= NULL && child[ANN_IN]!= KD_TRIVIAL)
+ delete child[ANN_IN];
+ if (child[ANN_OUT]!= NULL&& child[ANN_OUT]!= KD_TRIVIAL)
+ delete child[ANN_OUT];
+ if (bnds != NULL)
+ delete [] bnds; // delete bounds
+ }
+
+ virtual void getStats( // get tree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // statistics
+ ANNorthRect &bnd_box); // bounding box
+ virtual void print(int level, ostream &out);// print node
+ virtual void dump(ostream &out); // dump node
+
+ virtual void ann_search(ANNdist); // standard search
+ virtual void ann_pri_search(ANNdist); // priority search
+ virtual void ann_FR_search(ANNdist); // fixed-radius search
+};
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_fix_rad_search.h b/geom_bottleneck/bottleneck/include/ANN/kd_fix_rad_search.h
new file mode 100644
index 0000000..36f9528
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_fix_rad_search.h
@@ -0,0 +1,46 @@
+//----------------------------------------------------------------------
+// File: kd_fix_rad_search.h
+// Programmer: Sunil Arya and David Mount
+// Description: Standard kd-tree fixed-radius kNN search
+// Last modified: 05/03/05 (Version 1.1)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 1.1 05/03/05
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef ANN_kd_fix_rad_search_H
+#define ANN_kd_fix_rad_search_H
+
+#include "kd_tree.h" // kd-tree declarations
+#include "kd_util.h" // kd-tree utilities
+#include "pr_queue_k.h" // k-element priority queue
+
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Global variables
+// These are active for the life of each call to
+// annRangeSearch(). They are set to save the number of
+// variables that need to be passed among the various search
+// procedures.
+//----------------------------------------------------------------------
+
+extern ANNpoint ANNkdFRQ; // query point (static copy)
+
+}
+#endif \ No newline at end of file
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_pr_search.h b/geom_bottleneck/bottleneck/include/ANN/kd_pr_search.h
new file mode 100644
index 0000000..1f4c4fc
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_pr_search.h
@@ -0,0 +1,51 @@
+//----------------------------------------------------------------------
+// File: kd_pr_search.h
+// Programmer: Sunil Arya and David Mount
+// Description: Priority kd-tree search
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef ANN_kd_pr_search_H
+#define ANN_kd_pr_search_H
+
+#include "kd_tree.h" // kd-tree declarations
+#include "kd_util.h" // kd-tree utilities
+#include "pr_queue.h" // priority queue declarations
+#include "pr_queue_k.h" // k-element priority queue
+
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Global variables
+// Active for the life of each call to Appx_Near_Neigh() or
+// Appx_k_Near_Neigh().
+//----------------------------------------------------------------------
+
+extern double ANNprEps; // the error bound
+extern int ANNprDim; // dimension of space
+extern ANNpoint ANNprQ; // query point
+extern double ANNprMaxErr; // max tolerable squared error
+extern ANNpointArray ANNprPts; // the points
+extern ANNpr_queue *ANNprBoxPQ; // priority queue for boxes
+extern ANNmin_k *ANNprPointMK; // set of k closest points
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_search.h b/geom_bottleneck/bottleneck/include/ANN/kd_search.h
new file mode 100644
index 0000000..7491779
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_search.h
@@ -0,0 +1,50 @@
+//----------------------------------------------------------------------
+// File: kd_search.h
+// Programmer: Sunil Arya and David Mount
+// Description: Standard kd-tree search
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef ANN_kd_search_H
+#define ANN_kd_search_H
+
+#include "kd_tree.h" // kd-tree declarations
+#include "kd_util.h" // kd-tree utilities
+#include "pr_queue_k.h" // k-element priority queue
+
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// More global variables
+// These are active for the life of each call to annkSearch(). They
+// are set to save the number of variables that need to be passed
+// among the various search procedures.
+//----------------------------------------------------------------------
+
+extern int ANNkdDim; // dimension of space (static copy)
+extern ANNpoint ANNkdQ; // query point (static copy)
+extern double ANNkdMaxErr; // max tolerable squared error
+extern ANNpointArray ANNkdPts; // the points (static copy)
+extern ANNmin_k *ANNkdPointMK; // set of k closest points
+extern int ANNptsVisited; // number of points visited
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_split.h b/geom_bottleneck/bottleneck/include/ANN/kd_split.h
new file mode 100644
index 0000000..62533a1
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_split.h
@@ -0,0 +1,123 @@
+//----------------------------------------------------------------------
+// File: kd_split.h
+// Programmer: Sunil Arya and David Mount
+// Description: Methods for splitting kd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef ANN_KD_SPLIT_H
+#define ANN_KD_SPLIT_H
+
+#include "kd_tree.h" // kd-tree definitions
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// External entry points
+// These are all splitting procedures for kd-trees.
+//----------------------------------------------------------------------
+
+void kd_split( // standard (optimized) kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+void midpt_split( // midpoint kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+void sl_midpt_split( // sliding midpoint kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+void fair_split( // fair-split kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+void sl_fair_split( // sliding fair-split kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+////////////////////////////////////////////////////////////////////////////////
+//
+void kd_split_wd( // standard (optimized) kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx); // index of cutting point (returned)
+
+void midpt_split_wd( // midpoint kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx); // index of cutting point (returned)
+
+void sl_midpt_split_wd( // sliding midpoint kd-splitter
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx); // index of cutting point (returned)
+
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_tree.h b/geom_bottleneck/bottleneck/include/ANN/kd_tree.h
new file mode 100644
index 0000000..5fb362d
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_tree.h
@@ -0,0 +1,253 @@
+//----------------------------------------------------------------------
+// File: kd_tree.h
+// Programmer: Sunil Arya and David Mount
+// Description: Declarations for standard kd-tree routines
+// Last modified: 05/03/05 (Version 1.1)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.1 05/03/05
+// Added fixed radius kNN search
+// --------------------------------------------------------------------
+// 2015 - modified by A. Nigmetov to support deletion of points
+//----------------------------------------------------------------------
+
+#ifndef ANN_kd_tree_H
+#define ANN_kd_tree_H
+
+#include <utility> // for std::pair
+#include <ANN/ANNx.h> // all ANN includes
+
+using namespace std; // make std:: available
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Generic kd-tree node
+//
+// Nodes in kd-trees are of two types, splitting nodes which contain
+// splitting information (a splitting hyperplane orthogonal to one
+// of the coordinate axes) and leaf nodes which contain point
+// information (an array of points stored in a bucket). This is
+// handled by making a generic class kd_node, which is essentially an
+// empty shell, and then deriving the leaf and splitting nodes from
+// this.
+//----------------------------------------------------------------------
+//class ANNkd_node;
+class ANNkd_split;
+
+//typedef std::pair<ANNidx, ANNkd_node*> ANNreplaceSearchRes;
+
+class ANNkd_node{ // generic kd-tree node (empty shell)
+protected:
+ int actual_num_points; //
+ ANNkd_split* parent;
+public:
+ ANNkd_split* getParent() const { return parent; }
+ void setParent(ANNkd_split* par) { parent = par; }
+ int getNumPoints() const { return actual_num_points; }
+ void setNumPoints(int n) { assert(n >=0 ); actual_num_points = n; }
+ void decNumPoints() { assert(actual_num_points > 0); actual_num_points--; }
+ virtual ~ANNkd_node() {} // virtual distroyer
+
+ virtual void ann_search(ANNdist) = 0; // tree search
+ virtual void ann_pri_search(ANNdist) = 0; // priority search
+ virtual void ann_FR_search(ANNdist) = 0; // fixed-radius search
+
+ virtual void getStats( // get tree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // statistics
+ ANNorthRect &bnd_box) = 0; // bounding box
+ // print node
+ virtual void print(int level, ostream &out) = 0;
+ virtual void dump(ostream &out) = 0; // dump node
+
+ friend class ANNkd_tree; // allow kd-tree to access us
+
+ ////////////////////////////////////////////////////////////////////////
+ // deletion
+ virtual void delete_point(const int point_idx) {}
+ // range search
+ virtual void range_search(const ANNorthRect& region, // query region
+ int ANNkdDim, // dimension of points,
+ ANNpointArray ANNkdPts, // array of points
+ ANNorthRect& bnd_box, // bounding box of the current node,
+ // comes precomputed from the caller
+ std::vector<size_t>& pointIdices) {} // indices of points are returned in this vector
+ virtual void range_search_add(std::vector<size_t>& pointIdices) {} // add all points to pointIdices
+};
+
+
+
+//----------------------------------------------------------------------
+// kd-splitting function:
+// kd_splitter is a pointer to a splitting routine for preprocessing.
+// Different splitting procedures result in different strategies
+// for building the tree.
+//----------------------------------------------------------------------
+typedef void (*ANNkd_splitter)( // splitting routine for kd-trees
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo); // num of points on low side (returned)
+
+//----------------------------------------------------------------------
+// Leaf kd-tree node
+// Leaf nodes of the kd-tree store the set of points associated
+// with this bucket, stored as an array of point indices. These
+// are indices in the array points, which resides with the
+// root of the kd-tree. We also store the number of points
+// that reside in this bucket.
+//----------------------------------------------------------------------
+
+class ANNkd_leaf: public ANNkd_node // leaf node for kd-tree
+{
+ int n_pts;
+ ANNidxArray bkt; // bucket of points
+public:
+ ANNkd_leaf( // constructor
+ int n, // number of points
+ ANNidxArray b) : // bucket
+ n_pts(n),
+ bkt(b)
+ {
+ setNumPoints(n);
+ parent = NULL;
+ }
+
+ ~ANNkd_leaf() { } // destructor (none)
+
+ virtual void getStats( // get tree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // statistics
+ ANNorthRect &bnd_box); // bounding box
+ virtual void print(int level, ostream &out);// print node
+ virtual void dump(ostream &out); // dump node
+
+ virtual void ann_search(ANNdist); // standard search
+ virtual void ann_pri_search(ANNdist); // priority search
+ virtual void ann_FR_search(ANNdist); // fixed-radius search
+ // deletion
+ void delete_point(const int point_idx, const bool killYourself);
+ // range search
+ virtual void range_search(const ANNorthRect& region, // query region
+ int ANNkdDim, // dimension of points,
+ ANNpointArray ANNkdPts, // array of points
+ ANNorthRect& bnd_box, // bounding box of the current node,
+ // comes precomputed from the caller
+ std::vector<size_t>& pointIdices); // indices of points are returned in this vector
+ virtual void range_search_add(std::vector<size_t>& pointIdices); // add all points to pointIdices
+};
+
+//----------------------------------------------------------------------
+// KD_TRIVIAL is a special pointer to an empty leaf node. Since
+// some splitting rules generate many (more than 50%) trivial
+// leaves, we use this one shared node to save space.
+//
+// The pointer is initialized to NULL, but whenever a kd-tree is
+// created, we allocate this node, if it has not already been
+// allocated. This node is *never* deallocated, so it produces
+// a small memory leak.
+//----------------------------------------------------------------------
+
+extern ANNkd_leaf *KD_TRIVIAL; // trivial (empty) leaf node
+
+//----------------------------------------------------------------------
+// kd-tree splitting node.
+// Splitting nodes contain a cutting dimension and a cutting value.
+// These indicate the axis-parellel plane which subdivide the
+// box for this node. The extent of the bounding box along the
+// cutting dimension is maintained (this is used to speed up point
+// to box distance calculations) [we do not store the entire bounding
+// box since this may be wasteful of space in high dimensions].
+// We also store pointers to the 2 children.
+//----------------------------------------------------------------------
+
+class ANNkd_split : public ANNkd_node // splitting node of a kd-tree
+{
+ int cut_dim; // dim orthogonal to cutting plane
+ ANNcoord cut_val; // location of cutting plane
+ ANNcoord cd_bnds[2]; // lower and upper bounds of
+ // rectangle along cut_dim
+ ANNkd_ptr child[2]; // left and right children
+public:
+ ANNkd_split( // constructor
+ int cd, // cutting dimension
+ ANNcoord cv, // cutting value
+ ANNcoord lv, ANNcoord hv, // low and high values
+ ANNkd_ptr lc=NULL, ANNkd_ptr hc=NULL) // children
+ {
+ cut_dim = cd; // cutting dimension
+ cut_val = cv; // cutting value
+ cd_bnds[ANN_LO] = lv; // lower bound for rectangle
+ cd_bnds[ANN_HI] = hv; // upper bound for rectangle
+ child[ANN_LO] = lc; // left child
+ child[ANN_HI] = hc; // right child
+ parent = NULL;
+ }
+
+
+ ~ANNkd_split() // destructor
+ {
+ if (child[ANN_LO]!= NULL && child[ANN_LO]!= KD_TRIVIAL)
+ delete child[ANN_LO];
+ if (child[ANN_HI]!= NULL && child[ANN_HI]!= KD_TRIVIAL)
+ delete child[ANN_HI];
+ }
+
+ virtual void getStats( // get tree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // statistics
+ ANNorthRect &bnd_box); // bounding box
+ virtual void print(int level, ostream &out);// print node
+ virtual void dump(ostream &out); // dump node
+
+ virtual void ann_search(ANNdist); // standard search
+ virtual void ann_pri_search(ANNdist); // priority search
+ virtual void ann_FR_search(ANNdist); // fixed-radius search
+
+ ///////
+ void delete_leaf(ANNkd_leaf* childToDelete); // set the leaf to KD_TRIVIAL
+ // range search
+ virtual void range_search(const ANNorthRect& region, // query region
+ int ANNkdDim, // dimension of points,
+ ANNpointArray ANNkdPts, // array of points
+ ANNorthRect& bnd_box, // bounding box of the current node,
+ // comes precomputed from the caller
+ std::vector<size_t>& pointIdices); // indices of points are returned in this vector
+ virtual void range_search_add(std::vector<size_t>& pointIdices); // add all points to pointIdices
+};
+
+//----------------------------------------------------------------------
+// External entry points
+//----------------------------------------------------------------------
+
+ANNkd_ptr rkd_tree( // recursive construction of kd-tree
+ ANNpointArray pa, // point array (unaltered)
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ int bsp, // bucket space
+ ANNorthRect &bnd_box, // bounding box for current node
+ ANNkd_splitter splitter, // splitting routine
+ vector<ANNkd_leaf*>* ppointToLeafVec);
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/kd_util.h b/geom_bottleneck/bottleneck/include/ANN/kd_util.h
new file mode 100644
index 0000000..fa9f554
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/kd_util.h
@@ -0,0 +1,126 @@
+//----------------------------------------------------------------------
+// File: kd_util.h
+// Programmer: Sunil Arya and David Mount
+// Description: Common utilities for kd- trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef ANN_kd_util_H
+#define ANN_kd_util_H
+
+#include "kd_tree.h" // kd-tree declarations
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// externally accessible functions
+//----------------------------------------------------------------------
+
+double annAspectRatio( // compute aspect ratio of box
+ int dim, // dimension
+ const ANNorthRect &bnd_box); // bounding cube
+
+void annEnclRect( // compute smallest enclosing rectangle
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension
+ ANNorthRect &bnds); // bounding cube (returned)
+
+void annEnclCube( // compute smallest enclosing cube
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension
+ ANNorthRect &bnds); // bounding cube (returned)
+
+ANNdist annBoxDistance( // compute distance from point to box
+ const ANNpoint q, // the point
+ const ANNpoint lo, // low point of box
+ const ANNpoint hi, // high point of box
+ int dim); // dimension of space
+
+ANNcoord annSpread( // compute point spread along dimension
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d); // dimension to check
+
+void annMinMax( // compute min and max coordinates along dim
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension to check
+ ANNcoord& min, // minimum value (returned)
+ ANNcoord& max); // maximum value (returned)
+
+int annMaxSpread( // compute dimension of max spread
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim); // dimension of space
+
+void annMedianSplit( // split points along median value
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord &cv, // cutting value
+ int n_lo); // split into n_lo and n-n_lo
+
+void annPlaneSplit( // split points by a plane
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord cv, // cutting value
+ int &br1, // first break (values < cv)
+ int &br2); // second break (values == cv)
+
+void annBoxSplit( // split points by a box
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension of space
+ ANNorthRect &box, // the box
+ int &n_in); // number of points inside (returned)
+
+int annSplitBalance( // determine balance factor of a split
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord cv); // cutting value
+
+void annBox2Bnds( // convert inner box to bounds
+ const ANNorthRect &inner_box, // inner box
+ const ANNorthRect &bnd_box, // enclosing box
+ int dim, // dimension of space
+ int &n_bnds, // number of bounds (returned)
+ ANNorthHSArray &bnds); // bounds array (returned)
+
+void annBnds2Box( // convert bounds to inner box
+ const ANNorthRect &bnd_box, // enclosing box
+ int dim, // dimension of space
+ int n_bnds, // number of bounds
+ ANNorthHSArray bnds, // bounds array
+ ANNorthRect &inner_box); // inner box (returned)
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/ANN/pr_queue.h b/geom_bottleneck/bottleneck/include/ANN/pr_queue.h
new file mode 100644
index 0000000..f938a73
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/pr_queue.h
@@ -0,0 +1,127 @@
+//----------------------------------------------------------------------
+// File: pr_queue.h
+// Programmer: Sunil Arya and David Mount
+// Description: Include file for priority queue and related
+// structures.
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef PR_QUEUE_H
+#define PR_QUEUE_H
+
+#include <ANN/ANNx.h> // all ANN includes
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Basic types.
+//----------------------------------------------------------------------
+typedef void *PQinfo; // info field is generic pointer
+typedef ANNdist PQkey; // key field is distance
+
+//----------------------------------------------------------------------
+// Priority queue
+// A priority queue is a list of items, along with associated
+// priorities. The basic operations are insert and extract_minimum.
+//
+// The priority queue is maintained using a standard binary heap.
+// (Implementation note: Indexing is performed from [1..max] rather
+// than the C standard of [0..max-1]. This simplifies parent/child
+// computations.) User information consists of a void pointer,
+// and the user is responsible for casting this quantity into whatever
+// useful form is desired.
+//
+// Because the priority queue is so central to the efficiency of
+// query processing, all the code is inline.
+//----------------------------------------------------------------------
+
+class ANNpr_queue {
+
+ struct pq_node { // node in priority queue
+ PQkey key; // key value
+ PQinfo info; // info field
+ };
+ int n; // number of items in queue
+ int max_size; // maximum queue size
+ pq_node *pq; // the priority queue (array of nodes)
+
+public:
+ ANNpr_queue(int max) // constructor (given max size)
+ {
+ n = 0; // initially empty
+ max_size = max; // maximum number of items
+ pq = new pq_node[max+1]; // queue is array [1..max] of nodes
+ }
+
+ ~ANNpr_queue() // destructor
+ { delete [] pq; }
+
+ ANNbool empty() // is queue empty?
+ { if (n==0) return ANNtrue; else return ANNfalse; }
+
+ ANNbool non_empty() // is queue nonempty?
+ { if (n==0) return ANNfalse; else return ANNtrue; }
+
+ void reset() // make existing queue empty
+ { n = 0; }
+
+ inline void insert( // insert item (inlined for speed)
+ PQkey kv, // key value
+ PQinfo inf) // item info
+ {
+ if (++n > max_size) annError("Priority queue overflow.", ANNabort);
+ register int r = n;
+ while (r > 1) { // sift up new item
+ register int p = r/2;
+ ANN_FLOP(1) // increment floating ops
+ if (pq[p].key <= kv) // in proper order
+ break;
+ pq[r] = pq[p]; // else swap with parent
+ r = p;
+ }
+ pq[r].key = kv; // insert new item at final location
+ pq[r].info = inf;
+ }
+
+ inline void extr_min( // extract minimum (inlined for speed)
+ PQkey &kv, // key (returned)
+ PQinfo &inf) // item info (returned)
+ {
+ kv = pq[1].key; // key of min item
+ inf = pq[1].info; // information of min item
+ register PQkey kn = pq[n--].key;// last item in queue
+ register int p = 1; // p points to item out of position
+ register int r = p<<1; // left child of p
+ while (r <= n) { // while r is still within the heap
+ ANN_FLOP(2) // increment floating ops
+ // set r to smaller child of p
+ if (r < n && pq[r].key > pq[r+1].key) r++;
+ if (kn <= pq[r].key) // in proper order
+ break;
+ pq[p] = pq[r]; // else swap with child
+ p = r; // advance pointers
+ r = p<<1;
+ }
+ pq[p] = pq[n+1]; // insert last item in proper place
+ }
+};
+
+}
+#endif \ No newline at end of file
diff --git a/geom_bottleneck/bottleneck/include/ANN/pr_queue_k.h b/geom_bottleneck/bottleneck/include/ANN/pr_queue_k.h
new file mode 100644
index 0000000..133a766
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/ANN/pr_queue_k.h
@@ -0,0 +1,120 @@
+//----------------------------------------------------------------------
+// File: pr_queue_k.h
+// Programmer: Sunil Arya and David Mount
+// Description: Include file for priority queue with k items.
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#ifndef PR_QUEUE_K_H
+#define PR_QUEUE_K_H
+
+#include <ANN/ANNx.h> // all ANN includes
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Basic types
+//----------------------------------------------------------------------
+typedef ANNdist PQKkey; // key field is distance
+typedef int PQKinfo; // info field is int
+
+//----------------------------------------------------------------------
+// Constants
+// The NULL key value is used to initialize the priority queue, and
+// so it should be larger than any valid distance, so that it will
+// be replaced as legal distance values are inserted. The NULL
+// info value must be a nonvalid array index, we use ANN_NULL_IDX,
+// which is guaranteed to be negative.
+//----------------------------------------------------------------------
+
+const PQKkey PQ_NULL_KEY = ANN_DIST_INF; // nonexistent key value
+const PQKinfo PQ_NULL_INFO = ANN_NULL_IDX; // nonexistent info value
+
+//----------------------------------------------------------------------
+// ANNmin_k
+// An ANNmin_k structure is one which maintains the smallest
+// k values (of type PQKkey) and associated information (of type
+// PQKinfo). The special info and key values PQ_NULL_INFO and
+// PQ_NULL_KEY means that thise entry is empty.
+//
+// It is currently implemented using an array with k items.
+// Items are stored in increasing sorted order, and insertions
+// are made through standard insertion sort. (This is quite
+// inefficient, but current applications call for small values
+// of k and relatively few insertions.)
+//
+// Note that the list contains k+1 entries, but the last entry
+// is used as a simple placeholder and is otherwise ignored.
+//----------------------------------------------------------------------
+
+class ANNmin_k {
+ struct mk_node { // node in min_k structure
+ PQKkey key; // key value
+ PQKinfo info; // info field (user defined)
+ };
+
+ int k; // max number of keys to store
+ int n; // number of keys currently active
+ mk_node *mk; // the list itself
+
+public:
+ ANNmin_k(int max) // constructor (given max size)
+ {
+ n = 0; // initially no items
+ k = max; // maximum number of items
+ mk = new mk_node[max+1]; // sorted array of keys
+ }
+
+ ~ANNmin_k() // destructor
+ { delete [] mk; }
+
+ PQKkey ANNmin_key() // return minimum key
+ { return (n > 0 ? mk[0].key : PQ_NULL_KEY); }
+
+ PQKkey max_key() // return maximum key
+ { return (n == k ? mk[k-1].key : PQ_NULL_KEY); }
+
+ PQKkey ith_smallest_key(int i) // ith smallest key (i in [0..n-1])
+ { return (i < n ? mk[i].key : PQ_NULL_KEY); }
+
+ PQKinfo ith_smallest_info(int i) // info for ith smallest (i in [0..n-1])
+ { return (i < n ? mk[i].info : PQ_NULL_INFO); }
+
+ inline void insert( // insert item (inlined for speed)
+ PQKkey kv, // key value
+ PQKinfo inf) // item info
+ {
+ register int i;
+ // slide larger values up
+ for (i = n; i > 0; i--) {
+ if (mk[i-1].key > kv)
+ mk[i] = mk[i-1];
+ else
+ break;
+ }
+ mk[i].key = kv; // store element here
+ mk[i].info = inf;
+ if (n < k) n++; // increment number of items
+ ANN_FLOP(k-i+1) // increment floating ops
+ }
+};
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/basic_defs_bt.h b/geom_bottleneck/bottleneck/include/basic_defs_bt.h
new file mode 100644
index 0000000..ef679ae
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/basic_defs_bt.h
@@ -0,0 +1,188 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+#ifndef BASIC_DEFS_BT_H
+#define BASIC_DEFS_BT_H
+
+#ifdef _WIN32
+#include <ciso646>
+#endif
+
+#include <vector>
+#include <math.h>
+#include <cstddef>
+#include <unordered_map>
+#include <unordered_set>
+#include <iostream>
+#include <string>
+#include <assert.h>
+
+#include "def_debug.h"
+
+
+namespace geom_bt {
+
+typedef double CoordinateType;
+typedef int IdType;
+constexpr IdType MinValidId = 10;
+
+struct Point {
+ CoordinateType x, y;
+ bool operator==(const Point& other) const;
+ bool operator!=(const Point& other) const;
+ Point(CoordinateType ax, CoordinateType ay) : x(ax), y(ay) {}
+ Point() : x(0.0), y(0.0) {}
+ friend std::ostream& operator<<(std::ostream& output, const Point p);
+};
+
+struct DiagramPoint
+{
+ // Points above the diagonal have type NORMAL
+ // Projections onto the diagonal have type DIAG
+ // for DIAG points only x-coordinate is relevant
+ // to-do: add getters/setters, checks in constructors, etc
+ enum Type { NORMAL, DIAG};
+ // data members
+private:
+ CoordinateType x, y;
+public:
+ Type type;
+ IdType id;
+ // operators, constructors
+ bool operator==(const DiagramPoint& other) const;
+ bool operator!=(const DiagramPoint& other) const;
+ DiagramPoint(CoordinateType xx, CoordinateType yy, Type ttype, IdType uid);
+ bool isDiagonal(void) const { return type == DIAG; }
+ bool isNormal(void) const { return type == NORMAL; }
+ CoordinateType inline getRealX() const // return the x-coord
+ {
+ return x;
+ //if (DiagramPoint::NORMAL == type)
+ //return x;
+ //else
+ //return 0.5 * ( x + y);
+ }
+
+ CoordinateType inline getRealY() const // return the y-coord
+ {
+ return y;
+ //if (DiagramPoint::NORMAL == type)
+ //return y;
+ //else
+ //return 0.5 * ( x + y);
+ }
+
+ friend std::ostream& operator<<(std::ostream& output, const DiagramPoint p);
+};
+
+struct PointHash {
+ size_t operator()(const Point& p) const{
+ return std::hash<CoordinateType>()(p.x)^std::hash<CoordinateType>()(p.y);
+ }
+};
+
+struct DiagramPointHash {
+ size_t operator()(const DiagramPoint& p) const{
+ //return std::hash<CoordinateType>()(p.x)^std::hash<CoordinateType>()(p.y)^std::hash<bool>()(p.type == DiagramPoint::NORMAL);
+ assert(p.id >= MinValidId);
+ return std::hash<int>()(p.id);
+ }
+};
+
+CoordinateType sqrDist(const Point& a, const Point& b);
+CoordinateType dist(const Point& a, const Point& b);
+CoordinateType distLInf(const DiagramPoint& a, const DiagramPoint& b);
+
+typedef std::unordered_set<Point, PointHash> PointSet;
+
+class DiagramPointSet {
+public:
+ void insert(const DiagramPoint p);
+ void erase(const DiagramPoint& p, bool doCheck = true); // if doCheck, erasing non-existing elements causes assert
+ void erase(const std::unordered_set<DiagramPoint, DiagramPointHash>::const_iterator it);
+ void removeDiagonalPoints();
+ size_t size() const;
+ void reserve(const size_t newSize);
+ void clear();
+ bool empty() const;
+ bool hasElement(const DiagramPoint& p) const;
+ std::unordered_set<DiagramPoint, DiagramPointHash>::iterator find(const DiagramPoint& p) { return points.find(p); };
+ std::unordered_set<DiagramPoint, DiagramPointHash>::const_iterator find(const DiagramPoint& p) const { return points.find(p); };
+ std::unordered_set<DiagramPoint, DiagramPointHash>::iterator begin() { return points.begin(); };
+ std::unordered_set<DiagramPoint, DiagramPointHash>::iterator end() { return points.end(); }
+ std::unordered_set<DiagramPoint, DiagramPointHash>::const_iterator cbegin() const { return points.cbegin(); }
+ std::unordered_set<DiagramPoint, DiagramPointHash>::const_iterator cend() const { return points.cend(); }
+ friend std::ostream& operator<<(std::ostream& output, const DiagramPointSet& ps);
+ friend void addProjections(DiagramPointSet& A, DiagramPointSet& B);
+ template<class PairIterator> DiagramPointSet(PairIterator first, PairIterator last);
+ template<class PairIterator> void fillIn(PairIterator first, PairIterator last);
+ // default ctor, empty diagram
+ DiagramPointSet(IdType minId = MinValidId + 1) : maxId(minId + 1) {};
+ IdType nextId() { return maxId + 1; }
+private:
+ bool isLinked { false };
+ IdType maxId {MinValidId + 1};
+ std::unordered_set<DiagramPoint, DiagramPointHash> points;
+};
+
+template<typename DiagPointContainer>
+CoordinateType getFurthestDistance3Approx(DiagPointContainer& A, DiagPointContainer& B)
+{
+ CoordinateType result { 0.0 };
+ DiagramPoint begA = *(A.begin());
+ DiagramPoint optB = *(B.begin());
+ for(const auto& pointB : B) {
+ if (distLInf(begA, pointB) > result) {
+ result = distLInf(begA, pointB);
+ optB = pointB;
+ }
+ }
+ for(const auto& pointA : A) {
+ if (distLInf(pointA, optB) > result) {
+ result = distLInf(pointA, optB);
+ }
+ }
+ return result;
+}
+
+template<class PairIterator>
+void DiagramPointSet::fillIn(PairIterator start, PairIterator end)
+{
+ isLinked = false;
+ clear();
+ IdType uniqueId = MinValidId + 1;
+ for(auto iter = start; iter != end; ++iter) {
+ insert(DiagramPoint(iter->first, iter->second, DiagramPoint::NORMAL, uniqueId++));
+ }
+}
+
+template<class PairIterator>
+DiagramPointSet::DiagramPointSet(PairIterator start, PairIterator end)
+{
+ fillIn(start, end);
+}
+
+// preprocess diagrams A and B by adding projections onto diagonal of points of
+// A to B and vice versa. NB: ids of points will be changed!
+void addProjections(DiagramPointSet& A, DiagramPointSet& B);
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/bottleneck.h b/geom_bottleneck/bottleneck/include/bottleneck.h
new file mode 100644
index 0000000..19ae89a
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/bottleneck.h
@@ -0,0 +1,100 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef BOTTLENECK_H
+#define BOTTLENECK_H
+
+
+#include <iostream>
+#include <fstream>
+#include <vector>
+#include <algorithm>
+#include <limits>
+#include <random>
+
+#include "basic_defs_bt.h"
+#include "bound_match.h"
+//#include "test_neighb_oracle.h"
+//#include "test_dist_calc.h"
+
+namespace geom_bt {
+typedef std::pair<double, std::pair<size_t, size_t>> DistVerticesPair;
+
+// functions taking DiagramPointSet as input.
+// ATTENTION: parameters A and B (diagrams) will be changed after the call
+// (projections added).
+
+// return the interval (distMin, distMax) such that:
+// a) actual bottleneck distance between A and B is contained in the interval
+// b) if the interval is not (0,0), then (distMax - distMin) / distMin < epsilon
+std::pair<double, double> bottleneckDistApproxInterval(DiagramPointSet& A, DiagramPointSet& B, const double epsilon);
+
+// get approximate distance,
+// see bottleneckDistApproxInterval
+double bottleneckDistApprox(DiagramPointSet& A, DiagramPointSet& B, const double epsilon);
+
+// get exact bottleneck distance,
+double bottleneckDistExact(DiagramPointSet& A, DiagramPointSet& B);
+
+// functions taking containers as input
+// template parameter PairContainer must be a container of pairs of real
+// numbers (pair.first = x-coordinate, pair.second = y-coordinate)
+// PairContainer class must support iteration of the form
+// for(it = pairContainer.begin(); it != pairContainer.end(); ++it)
+
+// return the interval (distMin, distMax) such that:
+// a) actual bottleneck distance between A and B is contained in the interval
+// b) if the interval is not (0,0), then (distMax - distMin) / distMin < epsilon
+template<class PairContainer>
+std::pair<double, double> bottleneckDistApproxInterval(PairContainer& A, PairContainer& B, const double epsilon)
+{
+ DiagramPointSet a(A.begin(), A.end());
+ DiagramPointSet b(B.begin(), B.end());
+ return bottleneckDistApproxInterval(a, b, epsilon);
+}
+
+// get approximate distance,
+// see bottleneckDistApproxInterval
+template<class PairContainer>
+double bottleneckDistApprox(PairContainer& A, PairContainer& B, const double epsilon)
+{
+ DiagramPointSet a(A.begin(), A.end());
+ DiagramPointSet b(B.begin(), B.end());
+ return bottleneckDistApprox(a, b, epsilon);
+}
+
+// get exact bottleneck distance,
+template<class PairContainer>
+double bottleneckDistExact(PairContainer& A, PairContainer& B)
+{
+ DiagramPointSet a(A.begin(), A.end());
+ DiagramPointSet b(B.begin(), B.end());
+ return bottleneckDistExact(a, b);
+}
+
+// fill in result with points from file fname
+// return false if file can't be opened
+// or error occurred while reading
+bool readDiagramPointSet(const char* fname, std::vector<std::pair<double, double>>& result);
+// wrapper for standard string
+bool readDiagramPointSet(const std::string& fname, std::vector<std::pair<double, double>>& result);
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/bound_match.h b/geom_bottleneck/bottleneck/include/bound_match.h
new file mode 100644
index 0000000..2e2d369
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/bound_match.h
@@ -0,0 +1,80 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef BOUND_MATCH_H
+#define BOUND_MATCH_H
+
+#include <unordered_map>
+
+#include "basic_defs_bt.h"
+#include "neighb_oracle.h"
+
+
+namespace geom_bt {
+typedef std::vector<DiagramPoint> Path;
+
+class Matching {
+public:
+ Matching(const DiagramPointSet& AA, const DiagramPointSet& BB) : A(AA), B(BB) {};
+ DiagramPointSet getExposedVertices(bool forA = true) const ;
+ bool isExposed(const DiagramPoint& p) const;
+ void getAllAdjacentVertices(const DiagramPointSet& setIn, DiagramPointSet& setOut, bool forA = true) const;
+ void increase(const Path& augmentingPath);
+ void checkAugPath(const Path& augPath) const;
+ bool getMatchedVertex(const DiagramPoint& p, DiagramPoint& result) const;
+ bool isPerfect() const;
+ void trimMatching(const double newThreshold);
+ friend std::ostream& operator<<(std::ostream& output, const Matching& m);
+private:
+ DiagramPointSet A;
+ DiagramPointSet B;
+ std::unordered_map<DiagramPoint, DiagramPoint, DiagramPointHash> AToB, BToA;
+ void matchVertices(const DiagramPoint& pA, const DiagramPoint& pB);
+ void sanityCheck() const;
+};
+
+
+
+class BoundMatchOracle {
+public:
+ BoundMatchOracle(DiagramPointSet psA, DiagramPointSet psB, double dEps, bool useRS = true);
+ bool isMatchLess(double r);
+ void setInnerOracle(NeighbOracleAbstract* innerOracle) { neighbOracle = innerOracle; }
+ bool buildMatchingForThreshold(const double r);
+ ~BoundMatchOracle();
+private:
+ DiagramPointSet A, B;
+ Matching M;
+ void printLayerGraph(void);
+ void buildLayerGraph(double r);
+ void buildLayerOracles(double r);
+ bool buildAugmentingPath(const DiagramPoint startVertex, Path& result);
+ void removeFromLayer(const DiagramPoint& p, const int layerIdx);
+ NeighbOracleAbstract* neighbOracle;
+ bool augPathExist;
+ std::vector<DiagramPointSet> layerGraph;
+ std::vector<NeighbOracle*> layerOracles;
+ double distEpsilon;
+ bool useRangeSearch;
+ double prevQueryValue;
+};
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/include/def_debug.h b/geom_bottleneck/bottleneck/include/def_debug.h
new file mode 100644
index 0000000..eaf356d
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/def_debug.h
@@ -0,0 +1,29 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef DEF_DEBUG_H
+#define DEF_DEBUG_H
+
+//#define DEBUG_BOUND_MATCH
+//#define DEBUG_NEIGHBOUR_ORACLE
+//#define DEBUG_MATCHING
+//#define DEBUG_AUCTION
+
+#endif
diff --git a/geom_bottleneck/bottleneck/include/neighb_oracle.h b/geom_bottleneck/bottleneck/include/neighb_oracle.h
new file mode 100644
index 0000000..f6f78b1
--- /dev/null
+++ b/geom_bottleneck/bottleneck/include/neighb_oracle.h
@@ -0,0 +1,91 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#ifndef NEIGHB_ORACLE_H
+#define NEIGHB_ORACLE_H
+
+#include <unordered_map>
+#include "basic_defs_bt.h"
+#include <ANN/ANN.h>
+
+namespace geom_bt {
+class NeighbOracleAbstract{
+public:
+ virtual void deletePoint(const DiagramPoint& p) = 0;
+ virtual void rebuild(const DiagramPointSet& S, double rr) = 0;
+ // return true, if r-neighbour of q exists in pointSet,
+ // false otherwise.
+ // the neighbour is returned in result
+ virtual bool getNeighbour(const DiagramPoint& q, DiagramPoint& result) const = 0;
+ virtual void getAllNeighbours(const DiagramPoint& q, std::vector<DiagramPoint>& result) = 0;
+ virtual ~NeighbOracleAbstract() {};
+protected:
+ double r;
+ double distEpsilon;
+};
+
+class NeighbOracleSimple : public NeighbOracleAbstract
+{
+public:
+ NeighbOracleSimple();
+ NeighbOracleSimple(const DiagramPointSet& S, const double rr, const double dEps);
+ void deletePoint(const DiagramPoint& p);
+ void rebuild(const DiagramPointSet& S, const double rr);
+ bool getNeighbour(const DiagramPoint& q, DiagramPoint& result) const;
+ void getAllNeighbours(const DiagramPoint& q, std::vector<DiagramPoint>& result);
+ ~NeighbOracleSimple() {};
+private:
+ DiagramPointSet pointSet;
+};
+
+class NeighbOracleAnn : public NeighbOracleAbstract
+{
+public:
+ NeighbOracleAnn(const DiagramPointSet& S, const double rr, const double dEps);
+ void deletePoint(const DiagramPoint& p);
+ void rebuild(const DiagramPointSet& S, const double rr);
+ bool getNeighbour(const DiagramPoint& q, DiagramPoint& result) const;
+ void getAllNeighbours(const DiagramPoint& q, std::vector<DiagramPoint>& result);
+ ~NeighbOracleAnn();
+//private:
+ //DiagramPointSet originalPointSet;
+ std::vector<DiagramPoint> allPoints;
+ DiagramPointSet diagonalPoints;
+ std::unordered_map<DiagramPoint, size_t, DiagramPointHash> pointIdxLookup;
+ // ann-stuff
+ static constexpr double annEpsilon {0};
+ static const int annK {1};
+ static const int annDim{2};
+ ANNpointArray annPoints;
+ ANNkd_tree* kdTree;
+ ANNidxArray annNeigbIndices;
+ ANNpoint annQueryPoint;
+ // to use in getAllNeighbours
+ ANNpoint lo;
+ ANNpoint hi;
+ ANNidxArray annIndices;
+ ANNdistArray annDistances;
+};
+
+//typedef NeighbOracleSimple NeighbOracle;
+typedef NeighbOracleAnn NeighbOracle;
+
+}
+#endif
diff --git a/geom_bottleneck/bottleneck/lib/dummy b/geom_bottleneck/bottleneck/lib/dummy
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/geom_bottleneck/bottleneck/lib/dummy
@@ -0,0 +1 @@
+
diff --git a/geom_bottleneck/bottleneck/src/ann/ANN.cpp b/geom_bottleneck/bottleneck/src/ann/ANN.cpp
new file mode 100644
index 0000000..7bae577
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/ANN.cpp
@@ -0,0 +1,230 @@
+//----------------------------------------------------------------------
+// File: ANN.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Methods for ANN.h and ANNx.h
+// Last modified: 01/27/10 (Version 1.1.2)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2010 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Added performance counting to annDist()
+// Revision 1.1.2 01/27/10
+// Fixed minor compilation bugs for new versions of gcc
+//----------------------------------------------------------------------
+
+#ifdef _WIN32
+#include <ciso646> // make VS more conformal
+#endif
+
+#include <cstdlib> // C standard lib defs
+#include <ANN/ANNx.h> // all ANN includes
+#include <ANN/ANNperf.h> // ANN performance
+
+
+
+using namespace std; // make std:: accessible
+
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Point methods
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// Distance utility.
+// (Note: In the nearest neighbor search, most distances are
+// computed using partial distance calculations, not this
+// procedure.)
+//----------------------------------------------------------------------
+
+ANNdist annDist( // interpoint squared distance
+ int dim,
+ ANNpoint p,
+ ANNpoint q)
+{
+ register int d;
+ register ANNcoord diff;
+ register ANNcoord dist;
+
+ dist = 0;
+ for (d = 0; d < dim; d++) {
+ diff = p[d] - q[d];
+ dist = ANN_SUM(dist, ANN_POW(diff));
+ }
+ ANN_FLOP(3*dim) // performance counts
+ ANN_PTS(1)
+ ANN_COORD(dim)
+ return dist;
+}
+
+//----------------------------------------------------------------------
+// annPrintPoint() prints a point to a given output stream.
+//----------------------------------------------------------------------
+
+void annPrintPt( // print a point
+ ANNpoint pt, // the point
+ int dim, // the dimension
+ std::ostream &out) // output stream
+{
+ for (int j = 0; j < dim; j++) {
+ out << pt[j];
+ if (j < dim-1) out << " ";
+ }
+}
+
+//----------------------------------------------------------------------
+// Point allocation/deallocation:
+//
+// Because points (somewhat like strings in C) are stored
+// as pointers. Consequently, creating and destroying
+// copies of points may require storage allocation. These
+// procedures do this.
+//
+// annAllocPt() and annDeallocPt() allocate a deallocate
+// storage for a single point, and return a pointer to it.
+//
+// annAllocPts() allocates an array of points as well a place
+// to store their coordinates, and initializes the points to
+// point to their respective coordinates. It allocates point
+// storage in a contiguous block large enough to store all the
+// points. It performs no initialization.
+//
+// annDeallocPts() should only be used on point arrays allocated
+// by annAllocPts since it assumes that points are allocated in
+// a block.
+//
+// annCopyPt() copies a point taking care to allocate storage
+// for the new point.
+//
+// annAssignRect() assigns the coordinates of one rectangle to
+// another. The two rectangles must have the same dimension
+// (and it is not possible to test this here).
+//----------------------------------------------------------------------
+
+ANNpoint annAllocPt(int dim, ANNcoord c) // allocate 1 point
+{
+ ANNpoint p = new ANNcoord[dim];
+ for (int i = 0; i < dim; i++) p[i] = c;
+ return p;
+}
+
+ANNpointArray annAllocPts(int n, int dim) // allocate n pts in dim
+{
+ ANNpointArray pa = new ANNpoint[n]; // allocate points
+ ANNpoint p = new ANNcoord[n*dim]; // allocate space for coords
+ for (int i = 0; i < n; i++) {
+ pa[i] = &(p[i*dim]);
+ }
+ return pa;
+}
+
+void annDeallocPt(ANNpoint &p) // deallocate 1 point
+{
+ delete [] p;
+ p = NULL;
+}
+
+void annDeallocPts(ANNpointArray &pa) // deallocate points
+{
+ delete [] pa[0]; // dealloc coordinate storage
+ delete [] pa; // dealloc points
+ pa = NULL;
+}
+
+ANNpoint annCopyPt(int dim, ANNpoint source) // copy point
+{
+ ANNpoint p = new ANNcoord[dim];
+ for (int i = 0; i < dim; i++) p[i] = source[i];
+ return p;
+}
+
+ // assign one rect to another
+void annAssignRect(int dim, ANNorthRect &dest, const ANNorthRect &source)
+{
+ for (int i = 0; i < dim; i++) {
+ dest.lo[i] = source.lo[i];
+ dest.hi[i] = source.hi[i];
+ }
+}
+
+ // is point inside rectangle?
+ANNbool ANNorthRect::inside(const int dim, ANNpoint p) const
+{
+ for (int i = 0; i < dim; i++) {
+ if (p[i] < lo[i] || p[i] > hi[i]) return ANNfalse;
+ }
+ return ANNtrue;
+}
+
+bool ANNorthRect::contains(const int dim, const ANNorthRect& r) const
+{
+ return this->inside(dim, r.hi) and this->inside(dim, r.lo);
+}
+
+bool ANNorthRect::intersects(const int dim, const ANNorthRect& r) const
+{
+ assert(dim == 2); // works for plane only
+ const ANNpoint otherLo = r.lo;
+ const ANNpoint otherHi = r.hi;
+ if ( otherLo[0] > hi[0] or
+ otherLo[1] > hi[1] or
+ otherHi[0] < lo[0] or
+ otherHi[1] < lo[1]) {
+ return false;
+ } else {
+ return true;
+ }
+}
+
+//----------------------------------------------------------------------
+// Error handler
+//----------------------------------------------------------------------
+
+void annError(const char* msg, ANNerr level)
+{
+ if (level == ANNabort) {
+ cerr << "ANN: ERROR------->" << msg << "<-------------ERROR\n";
+ exit(1);
+ }
+ else {
+ cerr << "ANN: WARNING----->" << msg << "<-------------WARNING\n";
+ }
+}
+
+//----------------------------------------------------------------------
+// Limit on number of points visited
+// We have an option for terminating the search early if the
+// number of points visited exceeds some threshold. If the
+// threshold is 0 (its default) this means there is no limit
+// and the algorithm applies its normal termination condition.
+// This is for applications where there are real time constraints
+// on the running time of the algorithm.
+//----------------------------------------------------------------------
+
+int ANNmaxPtsVisited = 0; // maximum number of pts visited
+int ANNptsVisited; // number of pts visited in search
+
+//----------------------------------------------------------------------
+// Global function declarations
+//----------------------------------------------------------------------
+
+void annMaxPtsVisit( // set limit on max. pts to visit in search
+ int maxPts) // the limit
+{
+ ANNmaxPtsVisited = maxPts;
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/bd_fix_rad_search.cpp b/geom_bottleneck/bottleneck/src/ann/bd_fix_rad_search.cpp
new file mode 100644
index 0000000..fe8ab78
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/bd_fix_rad_search.cpp
@@ -0,0 +1,64 @@
+//----------------------------------------------------------------------
+// File: bd_fix_rad_search.cpp
+// Programmer: David Mount
+// Description: Standard bd-tree search
+// Last modified: 05/03/05 (Version 1.1)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 1.1 05/03/05
+// Initial release
+//----------------------------------------------------------------------
+
+#include "bd_tree.h" // bd-tree declarations
+#include "kd_fix_rad_search.h" // kd-tree FR search declarations
+
+namespace geom_bt {
+
+//----------------------------------------------------------------------
+// Approximate searching for bd-trees.
+// See the file kd_FR_search.cpp for general information on the
+// approximate nearest neighbor search algorithm. Here we
+// include the extensions for shrinking nodes.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// bd_shrink::ann_FR_search - search a shrinking node
+//----------------------------------------------------------------------
+
+void ANNbd_shrink::ann_FR_search(ANNdist box_dist)
+{
+ // check dist calc term cond.
+ if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
+
+ ANNdist inner_dist = 0; // distance to inner box
+ for (int i = 0; i < n_bnds; i++) { // is query point in the box?
+ if (bnds[i].out(ANNkdFRQ)) { // outside this bounding side?
+ // add to inner distance
+ inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNkdFRQ));
+ }
+ }
+ if (inner_dist <= box_dist) { // if inner box is closer
+ child[ANN_IN]->ann_FR_search(inner_dist);// search inner child first
+ child[ANN_OUT]->ann_FR_search(box_dist);// ...then outer child
+ }
+ else { // if outer box is closer
+ child[ANN_OUT]->ann_FR_search(box_dist);// search outer child first
+ child[ANN_IN]->ann_FR_search(inner_dist);// ...then outer child
+ }
+ ANN_FLOP(3*n_bnds) // increment floating ops
+ ANN_SHR(1) // one more shrinking node
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/bd_pr_search.cpp b/geom_bottleneck/bottleneck/src/ann/bd_pr_search.cpp
new file mode 100644
index 0000000..fb9dea6
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/bd_pr_search.cpp
@@ -0,0 +1,66 @@
+//----------------------------------------------------------------------
+// File: bd_pr_search.cpp
+// Programmer: David Mount
+// Description: Priority search for bd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+//History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#include "bd_tree.h" // bd-tree declarations
+#include "kd_pr_search.h" // kd priority search declarations
+
+
+namespace geom_bt {
+
+//----------------------------------------------------------------------
+// Approximate priority searching for bd-trees.
+// See the file kd_pr_search.cc for general information on the
+// approximate nearest neighbor priority search algorithm. Here
+// we include the extensions for shrinking nodes.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// bd_shrink::ann_search - search a shrinking node
+//----------------------------------------------------------------------
+
+void ANNbd_shrink::ann_pri_search(ANNdist box_dist)
+{
+ ANNdist inner_dist = 0; // distance to inner box
+ for (int i = 0; i < n_bnds; i++) { // is query point in the box?
+ if (bnds[i].out(ANNprQ)) { // outside this bounding side?
+ // add to inner distance
+ inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNprQ));
+ }
+ }
+ if (inner_dist <= box_dist) { // if inner box is closer
+ if (child[ANN_OUT] != KD_TRIVIAL) // enqueue outer if not trivial
+ ANNprBoxPQ->insert(box_dist,child[ANN_OUT]);
+ // continue with inner child
+ child[ANN_IN]->ann_pri_search(inner_dist);
+ }
+ else { // if outer box is closer
+ if (child[ANN_IN] != KD_TRIVIAL) // enqueue inner if not trivial
+ ANNprBoxPQ->insert(inner_dist,child[ANN_IN]);
+ // continue with outer child
+ child[ANN_OUT]->ann_pri_search(box_dist);
+ }
+ ANN_FLOP(3*n_bnds) // increment floating ops
+ ANN_SHR(1) // one more shrinking node
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/bd_search.cpp b/geom_bottleneck/bottleneck/src/ann/bd_search.cpp
new file mode 100644
index 0000000..2935bcb
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/bd_search.cpp
@@ -0,0 +1,64 @@
+//----------------------------------------------------------------------
+// File: bd_search.cpp
+// Programmer: David Mount
+// Description: Standard bd-tree search
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#include "bd_tree.h" // bd-tree declarations
+#include "kd_search.h" // kd-tree search declarations
+
+namespace geom_bt {
+
+//----------------------------------------------------------------------
+// Approximate searching for bd-trees.
+// See the file kd_search.cpp for general information on the
+// approximate nearest neighbor search algorithm. Here we
+// include the extensions for shrinking nodes.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// bd_shrink::ann_search - search a shrinking node
+//----------------------------------------------------------------------
+
+void ANNbd_shrink::ann_search(ANNdist box_dist)
+{
+ // check dist calc term cond.
+ if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
+
+ ANNdist inner_dist = 0; // distance to inner box
+ for (int i = 0; i < n_bnds; i++) { // is query point in the box?
+ if (bnds[i].out(ANNkdQ)) { // outside this bounding side?
+ // add to inner distance
+ inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNkdQ));
+ }
+ }
+ if (inner_dist <= box_dist) { // if inner box is closer
+ child[ANN_IN]->ann_search(inner_dist); // search inner child first
+ child[ANN_OUT]->ann_search(box_dist); // ...then outer child
+ }
+ else { // if outer box is closer
+ child[ANN_OUT]->ann_search(box_dist); // search outer child first
+ child[ANN_IN]->ann_search(inner_dist); // ...then outer child
+ }
+ ANN_FLOP(3*n_bnds) // increment floating ops
+ ANN_SHR(1) // one more shrinking node
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/bd_tree.cpp b/geom_bottleneck/bottleneck/src/ann/bd_tree.cpp
new file mode 100644
index 0000000..8c1ef6d
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/bd_tree.cpp
@@ -0,0 +1,419 @@
+//----------------------------------------------------------------------
+// File: bd_tree.cpp
+// Programmer: David Mount
+// Description: Basic methods for bd-trees.
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision l.0 04/01/05
+// Fixed centroid shrink threshold condition to depend on the
+// dimension.
+// Moved dump routine to kd_dump.cpp.
+//----------------------------------------------------------------------
+
+#include "bd_tree.h" // bd-tree declarations
+#include "kd_util.h" // kd-tree utilities
+#include "kd_split.h" // kd-tree splitting rules
+
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Printing a bd-tree
+// These routines print a bd-tree. See the analogous procedure
+// in kd_tree.cpp for more information.
+//----------------------------------------------------------------------
+
+void ANNbd_shrink::print( // print shrinking node
+ int level, // depth of node in tree
+ ostream &out) // output stream
+{
+ child[ANN_OUT]->print(level+1, out); // print out-child
+
+ out << " ";
+ for (int i = 0; i < level; i++) // print indentation
+ out << "..";
+ out << "Shrink";
+ for (int j = 0; j < n_bnds; j++) { // print sides, 2 per line
+ if (j % 2 == 0) {
+ out << "\n"; // newline and indentation
+ for (int i = 0; i < level+2; i++) out << " ";
+ }
+ out << " ([" << bnds[j].cd << "]"
+ << (bnds[j].sd > 0 ? ">=" : "< ")
+ << bnds[j].cv << ")";
+ }
+ out << "\n";
+
+ child[ANN_IN]->print(level+1, out); // print in-child
+}
+
+//----------------------------------------------------------------------
+// kd_tree statistics utility (for performance evaluation)
+// This routine computes various statistics information for
+// shrinking nodes. See file kd_tree.cpp for more information.
+//----------------------------------------------------------------------
+
+void ANNbd_shrink::getStats( // get subtree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // stats (modified)
+ ANNorthRect &bnd_box) // bounding box
+{
+ ANNkdStats ch_stats; // stats for children
+ ANNorthRect inner_box(dim); // inner box of shrink
+
+ annBnds2Box(bnd_box, // enclosing box
+ dim, // dimension
+ n_bnds, // number of bounds
+ bnds, // bounds array
+ inner_box); // inner box (modified)
+ // get stats for inner child
+ ch_stats.reset(); // reset
+ child[ANN_IN]->getStats(dim, ch_stats, inner_box);
+ st.merge(ch_stats); // merge them
+ // get stats for outer child
+ ch_stats.reset(); // reset
+ child[ANN_OUT]->getStats(dim, ch_stats, bnd_box);
+ st.merge(ch_stats); // merge them
+
+ st.depth++; // increment depth
+ st.n_shr++; // increment number of shrinks
+}
+
+//----------------------------------------------------------------------
+// bd-tree constructor
+// This is the main constructor for bd-trees given a set of points.
+// It first builds a skeleton kd-tree as a basis, then computes the
+// bounding box of the data points, and then invokes rbd_tree() to
+// actually build the tree, passing it the appropriate splitting
+// and shrinking information.
+//----------------------------------------------------------------------
+
+ANNkd_ptr rbd_tree( // recursive construction of bd-tree
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ int bsp, // bucket space
+ ANNorthRect &bnd_box, // bounding box for current node
+ ANNkd_splitter splitter, // splitting routine
+ ANNshrinkRule shrink); // shrinking rule
+
+ANNbd_tree::ANNbd_tree( // construct from point array
+ ANNpointArray pa, // point array (with at least n pts)
+ int n, // number of points
+ int dd, // dimension
+ int bs, // bucket size
+ ANNsplitRule split, // splitting rule
+ ANNshrinkRule shrink) // shrinking rule
+ : ANNkd_tree(n, dd, bs) // build skeleton base tree
+{
+ pts = pa; // where the points are
+ if (n == 0) return; // no points--no sweat
+
+ ANNorthRect bnd_box(dd); // bounding box for points
+ // construct bounding rectangle
+ annEnclRect(pa, pidx, n, dd, bnd_box);
+ // copy to tree structure
+ bnd_box_lo = annCopyPt(dd, bnd_box.lo);
+ bnd_box_hi = annCopyPt(dd, bnd_box.hi);
+
+ switch (split) { // build by rule
+ case ANN_KD_STD: // standard kd-splitting rule
+ root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, kd_split, shrink);
+ break;
+ case ANN_KD_MIDPT: // midpoint split
+ root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, midpt_split, shrink);
+ break;
+ case ANN_KD_SUGGEST: // best (in our opinion)
+ case ANN_KD_SL_MIDPT: // sliding midpoint split
+ root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, sl_midpt_split, shrink);
+ break;
+ case ANN_KD_FAIR: // fair split
+ root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, fair_split, shrink);
+ break;
+ case ANN_KD_SL_FAIR: // sliding fair split
+ root = rbd_tree(pa, pidx, n, dd, bs,
+ bnd_box, sl_fair_split, shrink);
+ break;
+ default:
+ annError("Illegal splitting method", ANNabort);
+ }
+}
+
+//----------------------------------------------------------------------
+// Shrinking rules
+//----------------------------------------------------------------------
+
+enum ANNdecomp {SPLIT, SHRINK}; // decomposition methods
+
+//----------------------------------------------------------------------
+// trySimpleShrink - Attempt a simple shrink
+//
+// We compute the tight bounding box of the points, and compute
+// the 2*dim ``gaps'' between the sides of the tight box and the
+// bounding box. If any of the gaps is large enough relative to
+// the longest side of the tight bounding box, then we shrink
+// all sides whose gaps are large enough. (The reason for
+// comparing against the tight bounding box, is that after
+// shrinking the longest box size will decrease, and if we use
+// the standard bounding box, we may decide to shrink twice in
+// a row. Since the tight box is fixed, we cannot shrink twice
+// consecutively.)
+//----------------------------------------------------------------------
+const float BD_GAP_THRESH = 0.5; // gap threshold (must be < 1)
+const int BD_CT_THRESH = 2; // min number of shrink sides
+
+ANNdecomp trySimpleShrink( // try a simple shrink
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ const ANNorthRect &bnd_box, // current bounding box
+ ANNorthRect &inner_box) // inner box if shrinking (returned)
+{
+ int i;
+ // compute tight bounding box
+ annEnclRect(pa, pidx, n, dim, inner_box);
+
+ ANNcoord max_length = 0; // find longest box side
+ for (i = 0; i < dim; i++) {
+ ANNcoord length = inner_box.hi[i] - inner_box.lo[i];
+ if (length > max_length) {
+ max_length = length;
+ }
+ }
+
+ int shrink_ct = 0; // number of sides we shrunk
+ for (i = 0; i < dim; i++) { // select which sides to shrink
+ // gap between boxes
+ ANNcoord gap_hi = bnd_box.hi[i] - inner_box.hi[i];
+ // big enough gap to shrink?
+ if (gap_hi < max_length*BD_GAP_THRESH)
+ inner_box.hi[i] = bnd_box.hi[i]; // no - expand
+ else shrink_ct++; // yes - shrink this side
+
+ // repeat for high side
+ ANNcoord gap_lo = inner_box.lo[i] - bnd_box.lo[i];
+ if (gap_lo < max_length*BD_GAP_THRESH)
+ inner_box.lo[i] = bnd_box.lo[i]; // no - expand
+ else shrink_ct++; // yes - shrink this side
+ }
+
+ if (shrink_ct >= BD_CT_THRESH) // did we shrink enough sides?
+ return SHRINK;
+ else return SPLIT;
+}
+
+//----------------------------------------------------------------------
+// tryCentroidShrink - Attempt a centroid shrink
+//
+// We repeatedly apply the splitting rule, always to the larger subset
+// of points, until the number of points decreases by the constant
+// fraction BD_FRACTION. If this takes more than dim*BD_MAX_SPLIT_FAC
+// splits for this to happen, then we shrink to the final inner box
+// Otherwise we split.
+//----------------------------------------------------------------------
+
+const float BD_MAX_SPLIT_FAC = 0.5; // maximum number of splits allowed
+const float BD_FRACTION = 0.5; // ...to reduce points by this fraction
+ // ...This must be < 1.
+
+ANNdecomp tryCentroidShrink( // try a centroid shrink
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ const ANNorthRect &bnd_box, // current bounding box
+ ANNkd_splitter splitter, // splitting procedure
+ ANNorthRect &inner_box) // inner box if shrinking (returned)
+{
+ int n_sub = n; // number of points in subset
+ int n_goal = (int) (n*BD_FRACTION); // number of point in goal
+ int n_splits = 0; // number of splits needed
+ // initialize inner box to bounding box
+ annAssignRect(dim, inner_box, bnd_box);
+
+ while (n_sub > n_goal) { // keep splitting until goal reached
+ int cd; // cut dim from splitter (ignored)
+ ANNcoord cv; // cut value from splitter (ignored)
+ int n_lo; // number of points on low side
+ // invoke splitting procedure
+ (*splitter)(pa, pidx, inner_box, n_sub, dim, cd, cv, n_lo);
+ n_splits++; // increment split count
+
+ if (n_lo >= n_sub/2) { // most points on low side
+ inner_box.hi[cd] = cv; // collapse high side
+ n_sub = n_lo; // recurse on lower points
+ }
+ else { // most points on high side
+ inner_box.lo[cd] = cv; // collapse low side
+ pidx += n_lo; // recurse on higher points
+ n_sub -= n_lo;
+ }
+ }
+ if (n_splits > dim*BD_MAX_SPLIT_FAC)// took too many splits
+ return SHRINK; // shrink to final subset
+ else
+ return SPLIT;
+}
+
+//----------------------------------------------------------------------
+// selectDecomp - select which decomposition to use
+//----------------------------------------------------------------------
+
+ANNdecomp selectDecomp( // select decomposition method
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ const ANNorthRect &bnd_box, // current bounding box
+ ANNkd_splitter splitter, // splitting procedure
+ ANNshrinkRule shrink, // shrinking rule
+ ANNorthRect &inner_box) // inner box if shrinking (returned)
+{
+ ANNdecomp decomp = SPLIT; // decomposition
+
+ switch (shrink) { // check shrinking rule
+ case ANN_BD_NONE: // no shrinking allowed
+ decomp = SPLIT;
+ break;
+ case ANN_BD_SUGGEST: // author's suggestion
+ case ANN_BD_SIMPLE: // simple shrink
+ decomp = trySimpleShrink(
+ pa, pidx, // points and indices
+ n, dim, // number of points and dimension
+ bnd_box, // current bounding box
+ inner_box); // inner box if shrinking (returned)
+ break;
+ case ANN_BD_CENTROID: // centroid shrink
+ decomp = tryCentroidShrink(
+ pa, pidx, // points and indices
+ n, dim, // number of points and dimension
+ bnd_box, // current bounding box
+ splitter, // splitting procedure
+ inner_box); // inner box if shrinking (returned)
+ break;
+ default:
+ annError("Illegal shrinking rule", ANNabort);
+ }
+ return decomp;
+}
+
+//----------------------------------------------------------------------
+// rbd_tree - recursive procedure to build a bd-tree
+//
+// This is analogous to rkd_tree, but for bd-trees. See the
+// procedure rkd_tree() in kd_split.cpp for more information.
+//
+// If the number of points falls below the bucket size, then a
+// leaf node is created for the points. Otherwise we invoke the
+// procedure selectDecomp() which determines whether we are to
+// split or shrink. If splitting is chosen, then we essentially
+// do exactly as rkd_tree() would, and invoke the specified
+// splitting procedure to the points. Otherwise, the selection
+// procedure returns a bounding box, from which we extract the
+// appropriate shrinking bounds, and create a shrinking node.
+// Finally the points are subdivided, and the procedure is
+// invoked recursively on the two subsets to form the children.
+//----------------------------------------------------------------------
+
+ANNkd_ptr rbd_tree( // recursive construction of bd-tree
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ int bsp, // bucket space
+ ANNorthRect &bnd_box, // bounding box for current node
+ ANNkd_splitter splitter, // splitting routine
+ ANNshrinkRule shrink) // shrinking rule
+{
+ ANNdecomp decomp; // decomposition method
+
+ ANNorthRect inner_box(dim); // inner box (if shrinking)
+
+ if (n <= bsp) { // n small, make a leaf node
+ if (n == 0) // empty leaf node
+ return KD_TRIVIAL; // return (canonical) empty leaf
+ else // construct the node and return
+ return new ANNkd_leaf(n, pidx);
+ }
+
+ decomp = selectDecomp( // select decomposition method
+ pa, pidx, // points and indices
+ n, dim, // number of points and dimension
+ bnd_box, // current bounding box
+ splitter, shrink, // splitting/shrinking methods
+ inner_box); // inner box if shrinking (returned)
+
+ if (decomp == SPLIT) { // split selected
+ int cd; // cutting dimension
+ ANNcoord cv; // cutting value
+ int n_lo; // number on low side of cut
+ // invoke splitting procedure
+ (*splitter)(pa, pidx, bnd_box, n, dim, cd, cv, n_lo);
+
+ ANNcoord lv = bnd_box.lo[cd]; // save bounds for cutting dimension
+ ANNcoord hv = bnd_box.hi[cd];
+
+ bnd_box.hi[cd] = cv; // modify bounds for left subtree
+ ANNkd_ptr lo = rbd_tree( // build left subtree
+ pa, pidx, n_lo, // ...from pidx[0..n_lo-1]
+ dim, bsp, bnd_box, splitter, shrink);
+ bnd_box.hi[cd] = hv; // restore bounds
+
+ bnd_box.lo[cd] = cv; // modify bounds for right subtree
+ ANNkd_ptr hi = rbd_tree( // build right subtree
+ pa, pidx + n_lo, n-n_lo,// ...from pidx[n_lo..n-1]
+ dim, bsp, bnd_box, splitter, shrink);
+ bnd_box.lo[cd] = lv; // restore bounds
+ // create the splitting node
+ return new ANNkd_split(cd, cv, lv, hv, lo, hi);
+ }
+ else { // shrink selected
+ int n_in; // number of points in box
+ int n_bnds; // number of bounding sides
+
+ annBoxSplit( // split points around inner box
+ pa, // points to split
+ pidx, // point indices
+ n, // number of points
+ dim, // dimension
+ inner_box, // inner box
+ n_in); // number of points inside (returned)
+
+ ANNkd_ptr in = rbd_tree( // build inner subtree pidx[0..n_in-1]
+ pa, pidx, n_in, dim, bsp, inner_box, splitter, shrink);
+ ANNkd_ptr out = rbd_tree( // build outer subtree pidx[n_in..n]
+ pa, pidx+n_in, n - n_in, dim, bsp, bnd_box, splitter, shrink);
+
+ ANNorthHSArray bnds = NULL; // bounds (alloc in Box2Bnds and
+ // ...freed in bd_shrink destroyer)
+
+ annBox2Bnds( // convert inner box to bounds
+ inner_box, // inner box
+ bnd_box, // enclosing box
+ dim, // dimension
+ n_bnds, // number of bounds (returned)
+ bnds); // bounds array (modified)
+
+ // return shrinking node
+ return new ANNbd_shrink(n_bnds, bnds, in, out);
+ }
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_dump.cpp b/geom_bottleneck/bottleneck/src/ann/kd_dump.cpp
new file mode 100644
index 0000000..64db9a7
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_dump.cpp
@@ -0,0 +1,447 @@
+//----------------------------------------------------------------------
+// File: kd_dump.cc
+// Programmer: David Mount
+// Description: Dump and Load for kd- and bd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Moved dump out of kd_tree.cc into this file.
+// Added kd-tree load constructor.
+//----------------------------------------------------------------------
+// This file contains routines for dumping kd-trees and bd-trees and
+// reloading them. (It is an abuse of policy to include both kd- and
+// bd-tree routines in the same file, sorry. There should be no problem
+// in deleting the bd- versions of the routines if they are not
+// desired.)
+//----------------------------------------------------------------------
+
+#include "kd_tree.h" // kd-tree declarations
+#include "bd_tree.h" // bd-tree declarations
+
+using namespace std; // make std:: available
+
+namespace geom_bt {
+
+ //----------------------------------------------------------------------
+ // Constants
+ //----------------------------------------------------------------------
+
+ const int STRING_LEN = 500; // maximum string length
+ const double EPSILON = 1E-5; // small number for float comparison
+
+ enum ANNtreeType { KD_TREE, BD_TREE }; // tree types (used in loading)
+
+ //----------------------------------------------------------------------
+ // Procedure declarations
+ //----------------------------------------------------------------------
+
+ static ANNkd_ptr annReadDump( // read dump file
+ istream &in, // input stream
+ ANNtreeType tree_type, // type of tree expected
+ ANNpointArray &the_pts, // new points (if applic)
+ ANNidxArray &the_pidx, // point indices (returned)
+ int &the_dim, // dimension (returned)
+ int &the_n_pts, // number of points (returned)
+ int &the_bkt_size, // bucket size (returned)
+ ANNpoint &the_bnd_box_lo, // low bounding point
+ ANNpoint &the_bnd_box_hi); // high bounding point
+
+ static ANNkd_ptr annReadTree( // read tree-part of dump file
+ istream &in, // input stream
+ ANNtreeType tree_type, // type of tree expected
+ ANNidxArray the_pidx, // point indices (modified)
+ int &next_idx); // next index (modified)
+
+ //----------------------------------------------------------------------
+ // ANN kd- and bd-tree Dump Format
+ // The dump file begins with a header containing the version of
+ // ANN, an optional section containing the points, followed by
+ // a description of the tree. The tree is printed in preorder.
+ //
+ // Format:
+ // #ANN <version number> <comments> [END_OF_LINE]
+ // points <dim> <n_pts> (point coordinates: this is optional)
+ // 0 <xxx> <xxx> ... <xxx> (point indices and coordinates)
+ // 1 <xxx> <xxx> ... <xxx>
+ // ...
+ // tree <dim> <n_pts> <bkt_size>
+ // <xxx> <xxx> ... <xxx> (lower end of bounding box)
+ // <xxx> <xxx> ... <xxx> (upper end of bounding box)
+ // If the tree is null, then a single line "null" is
+ // output. Otherwise the nodes of the tree are printed
+ // one per line in preorder. Leaves and splitting nodes
+ // have the following formats:
+ // Leaf node:
+ // leaf <n_pts> <bkt[0]> <bkt[1]> ... <bkt[n-1]>
+ // Splitting nodes:
+ // split <cut_dim> <cut_val> <lo_bound> <hi_bound>
+ //
+ // For bd-trees:
+ //
+ // Shrinking nodes:
+ // shrink <n_bnds>
+ // <cut_dim> <cut_val> <side>
+ // <cut_dim> <cut_val> <side>
+ // ... (repeated n_bnds times)
+ //----------------------------------------------------------------------
+
+ void ANNkd_tree::Dump( // dump entire tree
+ ANNbool with_pts, // print points as well?
+ ostream &out) // output stream
+ {
+ out << "#ANN " << ANNversion << "\n";
+ out.precision(ANNcoordPrec); // use full precision in dumping
+ if (with_pts) { // print point coordinates
+ out << "points " << dim << " " << n_pts << "\n";
+ for (int i = 0; i < n_pts; i++) {
+ out << i << " ";
+ annPrintPt(pts[i], dim, out);
+ out << "\n";
+ }
+ }
+ out << "tree " // print tree elements
+ << dim << " "
+ << n_pts << " "
+ << bkt_size << "\n";
+
+ annPrintPt(bnd_box_lo, dim, out); // print lower bound
+ out << "\n";
+ annPrintPt(bnd_box_hi, dim, out); // print upper bound
+ out << "\n";
+
+ if (root == NULL) // empty tree?
+ out << "null\n";
+ else {
+ root->dump(out); // invoke printing at root
+ }
+ out.precision(0); // restore default precision
+ }
+
+ void ANNkd_split::dump( // dump a splitting node
+ ostream &out) // output stream
+ {
+ out << "split " << cut_dim << " " << cut_val << " ";
+ out << cd_bnds[ANN_LO] << " " << cd_bnds[ANN_HI] << "\n";
+
+ child[ANN_LO]->dump(out); // print low child
+ child[ANN_HI]->dump(out); // print high child
+ }
+
+ void ANNkd_leaf::dump( // dump a leaf node
+ ostream &out) // output stream
+ {
+ if (this == KD_TRIVIAL) { // canonical trivial leaf node
+ out << "leaf 0\n"; // leaf no points
+ }
+ else {
+ out << "leaf " << n_pts;
+ for (int j = 0; j < n_pts; j++) {
+ out << " " << bkt[j];
+ }
+ out << "\n";
+ }
+ }
+
+ void ANNbd_shrink::dump( // dump a shrinking node
+ ostream &out) // output stream
+ {
+ out << "shrink " << n_bnds << "\n";
+ for (int j = 0; j < n_bnds; j++) {
+ out << bnds[j].cd << " " << bnds[j].cv << " " << bnds[j].sd << "\n";
+ }
+ child[ANN_IN]->dump(out); // print in-child
+ child[ANN_OUT]->dump(out); // print out-child
+ }
+
+ //----------------------------------------------------------------------
+ // Load kd-tree from dump file
+ // This rebuilds a kd-tree which was dumped to a file. The dump
+ // file contains all the basic tree information according to a
+ // preorder traversal. We assume that the dump file also contains
+ // point data. (This is to guarantee the consistency of the tree.)
+ // If not, then an error is generated.
+ //
+ // Indirectly, this procedure allocates space for points, point
+ // indices, all nodes in the tree, and the bounding box for the
+ // tree. When the tree is destroyed, all but the points are
+ // deallocated.
+ //
+ // This routine calls annReadDump to do all the work.
+ //----------------------------------------------------------------------
+
+ ANNkd_tree::ANNkd_tree( // build from dump file
+ istream &in) // input stream for dump file
+ {
+ int the_dim; // local dimension
+ int the_n_pts; // local number of points
+ int the_bkt_size; // local number of points
+ ANNpoint the_bnd_box_lo; // low bounding point
+ ANNpoint the_bnd_box_hi; // high bounding point
+ ANNpointArray the_pts; // point storage
+ ANNidxArray the_pidx; // point index storage
+ ANNkd_ptr the_root; // root of the tree
+
+ the_root = annReadDump( // read the dump file
+ in, // input stream
+ KD_TREE, // expecting a kd-tree
+ the_pts, // point array (returned)
+ the_pidx, // point indices (returned)
+ the_dim, the_n_pts, the_bkt_size, // basic tree info (returned)
+ the_bnd_box_lo, the_bnd_box_hi); // bounding box info (returned)
+
+ // create a skeletal tree
+ SkeletonTree(the_n_pts, the_dim, the_bkt_size, the_pts, the_pidx);
+
+ bnd_box_lo = the_bnd_box_lo;
+ bnd_box_hi = the_bnd_box_hi;
+
+ root = the_root; // set the root
+ }
+
+ ANNbd_tree::ANNbd_tree( // build bd-tree from dump file
+ istream &in) : ANNkd_tree() // input stream for dump file
+ {
+ int the_dim; // local dimension
+ int the_n_pts; // local number of points
+ int the_bkt_size; // local number of points
+ ANNpoint the_bnd_box_lo; // low bounding point
+ ANNpoint the_bnd_box_hi; // high bounding point
+ ANNpointArray the_pts; // point storage
+ ANNidxArray the_pidx; // point index storage
+ ANNkd_ptr the_root; // root of the tree
+
+ the_root = annReadDump( // read the dump file
+ in, // input stream
+ BD_TREE, // expecting a bd-tree
+ the_pts, // point array (returned)
+ the_pidx, // point indices (returned)
+ the_dim, the_n_pts, the_bkt_size, // basic tree info (returned)
+ the_bnd_box_lo, the_bnd_box_hi); // bounding box info (returned)
+
+ // create a skeletal tree
+ SkeletonTree(the_n_pts, the_dim, the_bkt_size, the_pts, the_pidx);
+ bnd_box_lo = the_bnd_box_lo;
+ bnd_box_hi = the_bnd_box_hi;
+
+ root = the_root; // set the root
+ }
+
+ //----------------------------------------------------------------------
+ // annReadDump - read a dump file
+ //
+ // This procedure reads a dump file, constructs a kd-tree
+ // and returns all the essential information needed to actually
+ // construct the tree. Because this procedure is used for
+ // constructing both kd-trees and bd-trees, the second argument
+ // is used to indicate which type of tree we are expecting.
+ //----------------------------------------------------------------------
+
+ static ANNkd_ptr annReadDump(
+ istream &in, // input stream
+ ANNtreeType tree_type, // type of tree expected
+ ANNpointArray &the_pts, // new points (returned)
+ ANNidxArray &the_pidx, // point indices (returned)
+ int &the_dim, // dimension (returned)
+ int &the_n_pts, // number of points (returned)
+ int &the_bkt_size, // bucket size (returned)
+ ANNpoint &the_bnd_box_lo, // low bounding point (ret'd)
+ ANNpoint &the_bnd_box_hi) // high bounding point (ret'd)
+ {
+ int j;
+ char str[STRING_LEN]; // storage for string
+ char version[STRING_LEN]; // ANN version number
+ ANNkd_ptr the_root = NULL;
+
+ //------------------------------------------------------------------
+ // Input file header
+ //------------------------------------------------------------------
+ in >> str; // input header
+ if (strcmp(str, "#ANN") != 0) { // incorrect header
+ annError("Incorrect header for dump file", ANNabort);
+ }
+ in.getline(version, STRING_LEN); // get version (ignore)
+
+ //------------------------------------------------------------------
+ // Input the points
+ // An array the_pts is allocated and points are read from
+ // the dump file.
+ //------------------------------------------------------------------
+ in >> str; // get major heading
+ if (strcmp(str, "points") == 0) { // points section
+ in >> the_dim; // input dimension
+ in >> the_n_pts; // number of points
+ // allocate point storage
+ the_pts = annAllocPts(the_n_pts, the_dim);
+ for (int i = 0; i < the_n_pts; i++) { // input point coordinates
+ ANNidx idx; // point index
+ in >> idx; // input point index
+ if (idx < 0 || idx >= the_n_pts) {
+ annError("Point index is out of range", ANNabort);
+ }
+ for (j = 0; j < the_dim; j++) {
+ in >> the_pts[idx][j]; // read point coordinates
+ }
+ }
+ in >> str; // get next major heading
+ }
+ else { // no points were input
+ annError("Points must be supplied in the dump file", ANNabort);
+ }
+
+ //------------------------------------------------------------------
+ // Input the tree
+ // After the basic header information, we invoke annReadTree
+ // to do all the heavy work. We create our own array of
+ // point indices (so we can pass them to annReadTree())
+ // but we do not deallocate them. They will be deallocated
+ // when the tree is destroyed.
+ //------------------------------------------------------------------
+ if (strcmp(str, "tree") == 0) { // tree section
+ in >> the_dim; // read dimension
+ in >> the_n_pts; // number of points
+ in >> the_bkt_size; // bucket size
+ the_bnd_box_lo = annAllocPt(the_dim); // allocate bounding box pts
+ the_bnd_box_hi = annAllocPt(the_dim);
+
+ for (j = 0; j < the_dim; j++) { // read bounding box low
+ in >> the_bnd_box_lo[j];
+ }
+ for (j = 0; j < the_dim; j++) { // read bounding box low
+ in >> the_bnd_box_hi[j];
+ }
+ the_pidx = new ANNidx[the_n_pts]; // allocate point index array
+ int next_idx = 0; // number of indices filled
+ // read the tree and indices
+ the_root = annReadTree(in, tree_type, the_pidx, next_idx);
+ if (next_idx != the_n_pts) { // didn't see all the points?
+ annError("Didn't see as many points as expected", ANNwarn);
+ }
+ }
+ else {
+ annError("Illegal dump format. Expecting section heading", ANNabort);
+ }
+ return the_root;
+ }
+
+ //----------------------------------------------------------------------
+ // annReadTree - input tree and return pointer
+ //
+ // annReadTree reads in a node of the tree, makes any recursive
+ // calls as needed to input the children of this node (if internal).
+ // It returns a pointer to the node that was created. An array
+ // of point indices is given along with a pointer to the next
+ // available location in the array. As leaves are read, their
+ // point indices are stored here, and the point buckets point
+ // to the first entry in the array.
+ //
+ // Recall that these are the formats. The tree is given in
+ // preorder.
+ //
+ // Leaf node:
+ // leaf <n_pts> <bkt[0]> <bkt[1]> ... <bkt[n-1]>
+ // Splitting nodes:
+ // split <cut_dim> <cut_val> <lo_bound> <hi_bound>
+ //
+ // For bd-trees:
+ //
+ // Shrinking nodes:
+ // shrink <n_bnds>
+ // <cut_dim> <cut_val> <side>
+ // <cut_dim> <cut_val> <side>
+ // ... (repeated n_bnds times)
+ //----------------------------------------------------------------------
+
+ static ANNkd_ptr annReadTree(
+ istream &in, // input stream
+ ANNtreeType tree_type, // type of tree expected
+ ANNidxArray the_pidx, // point indices (modified)
+ int &next_idx) // next index (modified)
+ {
+ char tag[STRING_LEN]; // tag (leaf, split, shrink)
+ int n_pts; // number of points in leaf
+ int cd; // cut dimension
+ ANNcoord cv; // cut value
+ ANNcoord lb; // low bound
+ ANNcoord hb; // high bound
+ int n_bnds; // number of bounding sides
+ int sd; // which side
+
+ in >> tag; // input node tag
+
+ if (strcmp(tag, "null") == 0) { // null tree
+ return NULL;
+ }
+ //------------------------------------------------------------------
+ // Read a leaf
+ //------------------------------------------------------------------
+ if (strcmp(tag, "leaf") == 0) { // leaf node
+
+ in >> n_pts; // input number of points
+ int old_idx = next_idx; // save next_idx
+ if (n_pts == 0) { // trivial leaf
+ return KD_TRIVIAL;
+ }
+ else {
+ for (int i = 0; i < n_pts; i++) { // input point indices
+ in >> the_pidx[next_idx++]; // store in array of indices
+ }
+ }
+ return new ANNkd_leaf(n_pts, &the_pidx[old_idx]);
+ }
+ //------------------------------------------------------------------
+ // Read a splitting node
+ //------------------------------------------------------------------
+ else if (strcmp(tag, "split") == 0) { // splitting node
+
+ in >> cd >> cv >> lb >> hb;
+
+ // read low and high subtrees
+ ANNkd_ptr lc = annReadTree(in, tree_type, the_pidx, next_idx);
+ ANNkd_ptr hc = annReadTree(in, tree_type, the_pidx, next_idx);
+ // create new node and return
+ return new ANNkd_split(cd, cv, lb, hb, lc, hc);
+ }
+ //------------------------------------------------------------------
+ // Read a shrinking node (bd-tree only)
+ //------------------------------------------------------------------
+ else if (strcmp(tag, "shrink") == 0) { // shrinking node
+ if (tree_type != BD_TREE) {
+ annError("Shrinking node not allowed in kd-tree", ANNabort);
+ }
+
+ in >> n_bnds; // number of bounding sides
+ // allocate bounds array
+ ANNorthHSArray bds = new ANNorthHalfSpace[n_bnds];
+ for (int i = 0; i < n_bnds; i++) {
+ in >> cd >> cv >> sd; // input bounding halfspace
+ // copy to array
+ bds[i] = ANNorthHalfSpace(cd, cv, sd);
+ }
+ // read inner and outer subtrees
+ ANNkd_ptr ic = annReadTree(in, tree_type, the_pidx, next_idx);
+ ANNkd_ptr oc = annReadTree(in, tree_type, the_pidx, next_idx);
+ // create new node and return
+ return new ANNbd_shrink(n_bnds, bds, ic, oc);
+ }
+ else {
+ annError("Illegal node type in dump file", ANNabort);
+ exit(0); // to keep the compiler happy
+ }
+ }
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_fix_rad_search.cpp b/geom_bottleneck/bottleneck/src/ann/kd_fix_rad_search.cpp
new file mode 100644
index 0000000..1a4749e
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_fix_rad_search.cpp
@@ -0,0 +1,185 @@
+//----------------------------------------------------------------------
+// File: kd_fix_rad_search.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Standard kd-tree fixed-radius kNN search
+// Last modified: 05/03/05 (Version 1.1)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 1.1 05/03/05
+// Initial release
+//----------------------------------------------------------------------
+
+#include "kd_fix_rad_search.h" // kd fixed-radius search decls
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Approximate fixed-radius k nearest neighbor search
+// The squared radius is provided, and this procedure finds the
+// k nearest neighbors within the radius, and returns the total
+// number of points lying within the radius.
+//
+// The method used for searching the kd-tree is a variation of the
+// nearest neighbor search used in kd_search.cpp, except that the
+// radius of the search ball is known. We refer the reader to that
+// file for the explanation of the recursive search procedure.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// To keep argument lists short, a number of global variables
+// are maintained which are common to all the recursive calls.
+// These are given below.
+//----------------------------------------------------------------------
+
+int ANNkdFRDim; // dimension of space
+ANNpoint ANNkdFRQ; // query point
+ANNdist ANNkdFRSqRad; // squared radius search bound
+double ANNkdFRMaxErr; // max tolerable squared error
+ANNpointArray ANNkdFRPts; // the points
+ANNmin_k* ANNkdFRPointMK; // set of k closest points
+int ANNkdFRPtsVisited; // total points visited
+int ANNkdFRPtsInRange; // number of points in the range
+
+//----------------------------------------------------------------------
+// annkFRSearch - fixed radius search for k nearest neighbors
+//----------------------------------------------------------------------
+
+int ANNkd_tree::annkFRSearch(
+ ANNpoint q, // the query point
+ ANNdist sqRad, // squared radius search bound
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor indices (returned)
+ ANNdistArray dd, // the approximate nearest neighbor
+ double eps) // the error bound
+{
+ ANNkdFRDim = dim; // copy arguments to static equivs
+ ANNkdFRQ = q;
+ ANNkdFRSqRad = sqRad;
+ ANNkdFRPts = pts;
+ ANNkdFRPtsVisited = 0; // initialize count of points visited
+ ANNkdFRPtsInRange = 0; // ...and points in the range
+
+ ANNkdFRMaxErr = ANN_POW(1.0 + eps);
+ ANN_FLOP(2) // increment floating op count
+
+ ANNkdFRPointMK = new ANNmin_k(k); // create set for closest k points
+ // search starting at the root
+ root->ann_FR_search(annBoxDistance(q, bnd_box_lo, bnd_box_hi, dim));
+
+ for (int i = 0; i < k; i++) { // extract the k-th closest points
+ if (dd != NULL)
+ dd[i] = ANNkdFRPointMK->ith_smallest_key(i);
+ if (nn_idx != NULL)
+ nn_idx[i] = ANNkdFRPointMK->ith_smallest_info(i);
+ }
+
+ delete ANNkdFRPointMK; // deallocate closest point set
+ return ANNkdFRPtsInRange; // return final point count
+}
+
+//----------------------------------------------------------------------
+// kd_split::ann_FR_search - search a splitting node
+// Note: This routine is similar in structure to the standard kNN
+// search. It visits the subtree that is closer to the query point
+// first. For fixed-radius search, there is no benefit in visiting
+// one subtree before the other, but we maintain the same basic
+// code structure for the sake of uniformity.
+//----------------------------------------------------------------------
+
+void ANNkd_split::ann_FR_search(ANNdist box_dist)
+{
+ // check dist calc term condition
+ if (ANNmaxPtsVisited != 0 && ANNkdFRPtsVisited > ANNmaxPtsVisited) return;
+
+ // distance to cutting plane
+ ANNcoord cut_diff = ANNkdFRQ[cut_dim] - cut_val;
+
+ if (cut_diff < 0) { // left of cutting plane
+ child[ANN_LO]->ann_FR_search(box_dist);// visit closer child first
+
+ ANNcoord box_diff = cd_bnds[ANN_LO] - ANNkdFRQ[cut_dim];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ box_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ // visit further child if in range
+ if (box_dist * ANNkdFRMaxErr <= ANNkdFRSqRad)
+ child[ANN_HI]->ann_FR_search(box_dist);
+
+ }
+ else { // right of cutting plane
+ child[ANN_HI]->ann_FR_search(box_dist);// visit closer child first
+
+ ANNcoord box_diff = ANNkdFRQ[cut_dim] - cd_bnds[ANN_HI];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ box_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ // visit further child if close enough
+ if (box_dist * ANNkdFRMaxErr <= ANNkdFRSqRad)
+ child[ANN_LO]->ann_FR_search(box_dist);
+
+ }
+ ANN_FLOP(13) // increment floating ops
+ ANN_SPL(1) // one more splitting node visited
+}
+
+//----------------------------------------------------------------------
+// kd_leaf::ann_FR_search - search points in a leaf node
+// Note: The unreadability of this code is the result of
+// some fine tuning to replace indexing by pointer operations.
+//----------------------------------------------------------------------
+
+void ANNkd_leaf::ann_FR_search(ANNdist box_dist)
+{
+ register ANNdist dist; // distance to data point
+ register ANNcoord* pp; // data coordinate pointer
+ register ANNcoord* qq; // query coordinate pointer
+ register ANNcoord t;
+ register int d;
+
+ for (int i = 0; i < n_pts; i++) { // check points in bucket
+
+ pp = ANNkdFRPts[bkt[i]]; // first coord of next data point
+ qq = ANNkdFRQ; // first coord of query point
+ dist = 0;
+
+ for(d = 0; d < ANNkdFRDim; d++) {
+ ANN_COORD(1) // one more coordinate hit
+ ANN_FLOP(5) // increment floating ops
+
+ t = *(qq++) - *(pp++); // compute length and adv coordinate
+ // exceeds dist to k-th smallest?
+ if( (dist = ANN_SUM(dist, ANN_POW(t))) > ANNkdFRSqRad) {
+ break;
+ }
+ }
+
+ if (d >= ANNkdFRDim && // among the k best?
+ (ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
+ // add it to the list
+ ANNkdFRPointMK->insert(dist, bkt[i]);
+ ANNkdFRPtsInRange++; // increment point count
+ }
+ }
+ ANN_LEAF(1) // one more leaf node visited
+ ANN_PTS(n_pts) // increment points visited
+ ANNkdFRPtsVisited += n_pts; // increment number of points visited
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_pr_search.cpp b/geom_bottleneck/bottleneck/src/ann/kd_pr_search.cpp
new file mode 100644
index 0000000..73d643f
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_pr_search.cpp
@@ -0,0 +1,221 @@
+//----------------------------------------------------------------------
+// File: kd_pr_search.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Priority search for kd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#include "kd_pr_search.h" // kd priority search declarations
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Approximate nearest neighbor searching by priority search.
+// The kd-tree is searched for an approximate nearest neighbor.
+// The point is returned through one of the arguments, and the
+// distance returned is the SQUARED distance to this point.
+//
+// The method used for searching the kd-tree is called priority
+// search. (It is described in Arya and Mount, ``Algorithms for
+// fast vector quantization,'' Proc. of DCC '93: Data Compression
+// Conference}, eds. J. A. Storer and M. Cohn, IEEE Press, 1993,
+// 381--390.)
+//
+// The cell of the kd-tree containing the query point is located,
+// and cells are visited in increasing order of distance from the
+// query point. This is done by placing each subtree which has
+// NOT been visited in a priority queue, according to the closest
+// distance of the corresponding enclosing rectangle from the
+// query point. The search stops when the distance to the nearest
+// remaining rectangle exceeds the distance to the nearest point
+// seen by a factor of more than 1/(1+eps). (Implying that any
+// point found subsequently in the search cannot be closer by more
+// than this factor.)
+//
+// The main entry point is annkPriSearch() which sets things up and
+// then call the recursive routine ann_pri_search(). This is a
+// recursive routine which performs the processing for one node in
+// the kd-tree. There are two versions of this virtual procedure,
+// one for splitting nodes and one for leaves. When a splitting node
+// is visited, we determine which child to continue the search on
+// (the closer one), and insert the other child into the priority
+// queue. When a leaf is visited, we compute the distances to the
+// points in the buckets, and update information on the closest
+// points.
+//
+// Some trickery is used to incrementally update the distance from
+// a kd-tree rectangle to the query point. This comes about from
+// the fact that which each successive split, only one component
+// (along the dimension that is split) of the squared distance to
+// the child rectangle is different from the squared distance to
+// the parent rectangle.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// To keep argument lists short, a number of global variables
+// are maintained which are common to all the recursive calls.
+// These are given below.
+//----------------------------------------------------------------------
+
+double ANNprEps; // the error bound
+int ANNprDim; // dimension of space
+ANNpoint ANNprQ; // query point
+double ANNprMaxErr; // max tolerable squared error
+ANNpointArray ANNprPts; // the points
+ANNpr_queue *ANNprBoxPQ; // priority queue for boxes
+ANNmin_k *ANNprPointMK; // set of k closest points
+
+//----------------------------------------------------------------------
+// annkPriSearch - priority search for k nearest neighbors
+//----------------------------------------------------------------------
+
+void ANNkd_tree::annkPriSearch(
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor indices (returned)
+ ANNdistArray dd, // dist to near neighbors (returned)
+ double eps) // error bound (ignored)
+{
+ // max tolerable squared error
+ ANNprMaxErr = ANN_POW(1.0 + eps);
+ ANN_FLOP(2) // increment floating ops
+
+ ANNprDim = dim; // copy arguments to static equivs
+ ANNprQ = q;
+ ANNprPts = pts;
+ ANNptsVisited = 0; // initialize count of points visited
+
+ ANNprPointMK = new ANNmin_k(k); // create set for closest k points
+
+ // distance to root box
+ ANNdist box_dist = annBoxDistance(q,
+ bnd_box_lo, bnd_box_hi, dim);
+
+ ANNprBoxPQ = new ANNpr_queue(n_pts);// create priority queue for boxes
+ ANNprBoxPQ->insert(box_dist, root); // insert root in priority queue
+
+ while (ANNprBoxPQ->non_empty() &&
+ (!(ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited))) {
+ ANNkd_ptr np; // next box from prior queue
+
+ // extract closest box from queue
+ ANNprBoxPQ->extr_min(box_dist, (void *&) np);
+
+ ANN_FLOP(2) // increment floating ops
+ if (box_dist*ANNprMaxErr >= ANNprPointMK->max_key())
+ break;
+
+ np->ann_pri_search(box_dist); // search this subtree.
+ }
+
+ for (int i = 0; i < k; i++) { // extract the k-th closest points
+ dd[i] = ANNprPointMK->ith_smallest_key(i);
+ nn_idx[i] = ANNprPointMK->ith_smallest_info(i);
+ }
+
+ delete ANNprPointMK; // deallocate closest point set
+ delete ANNprBoxPQ; // deallocate priority queue
+}
+
+//----------------------------------------------------------------------
+// kd_split::ann_pri_search - search a splitting node
+//----------------------------------------------------------------------
+
+void ANNkd_split::ann_pri_search(ANNdist box_dist)
+{
+ ANNdist new_dist; // distance to child visited later
+ // distance to cutting plane
+ ANNcoord cut_diff = ANNprQ[cut_dim] - cut_val;
+
+ if (cut_diff < 0) { // left of cutting plane
+ ANNcoord box_diff = cd_bnds[ANN_LO] - ANNprQ[cut_dim];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ new_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ if (child[ANN_HI] != KD_TRIVIAL)// enqueue if not trivial
+ ANNprBoxPQ->insert(new_dist, child[ANN_HI]);
+ // continue with closer child
+ child[ANN_LO]->ann_pri_search(box_dist);
+ }
+ else { // right of cutting plane
+ ANNcoord box_diff = ANNprQ[cut_dim] - cd_bnds[ANN_HI];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ new_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ if (child[ANN_LO] != KD_TRIVIAL)// enqueue if not trivial
+ ANNprBoxPQ->insert(new_dist, child[ANN_LO]);
+ // continue with closer child
+ child[ANN_HI]->ann_pri_search(box_dist);
+ }
+ ANN_SPL(1) // one more splitting node visited
+ ANN_FLOP(8) // increment floating ops
+}
+
+//----------------------------------------------------------------------
+// kd_leaf::ann_pri_search - search points in a leaf node
+//
+// This is virtually identical to the ann_search for standard search.
+//----------------------------------------------------------------------
+
+void ANNkd_leaf::ann_pri_search(ANNdist box_dist)
+{
+ register ANNdist dist; // distance to data point
+ register ANNcoord* pp; // data coordinate pointer
+ register ANNcoord* qq; // query coordinate pointer
+ register ANNdist min_dist; // distance to k-th closest point
+ register ANNcoord t;
+ register int d;
+
+ min_dist = ANNprPointMK->max_key(); // k-th smallest distance so far
+
+ for (int i = 0; i < n_pts; i++) { // check points in bucket
+
+ pp = ANNprPts[bkt[i]]; // first coord of next data point
+ qq = ANNprQ; // first coord of query point
+ dist = 0;
+
+ for(d = 0; d < ANNprDim; d++) {
+ ANN_COORD(1) // one more coordinate hit
+ ANN_FLOP(4) // increment floating ops
+
+ t = *(qq++) - *(pp++); // compute length and adv coordinate
+ // exceeds dist to k-th smallest?
+ if( (dist = ANN_SUM(dist, ANN_POW(t))) > min_dist) {
+ break;
+ }
+ }
+
+ if (d >= ANNprDim && // among the k best?
+ (ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
+ // add it to the list
+ ANNprPointMK->insert(dist, bkt[i]);
+ min_dist = ANNprPointMK->max_key();
+ }
+ }
+ ANN_LEAF(1) // one more leaf node visited
+ ANN_PTS(n_pts) // increment points visited
+ ANNptsVisited += n_pts; // increment number of points visited
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_search.cpp b/geom_bottleneck/bottleneck/src/ann/kd_search.cpp
new file mode 100644
index 0000000..f559eb9
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_search.cpp
@@ -0,0 +1,298 @@
+//----------------------------------------------------------------------
+// File: kd_search.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Standard kd-tree search
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Changed names LO, HI to ANN_LO, ANN_HI
+// --------------------------------------------------------------------
+// 2015 - modified by A. Nigmetov to support deletion of points
+//----------------------------------------------------------------------
+
+#include "kd_search.h" // kd-search declarations
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Approximate nearest neighbor searching by kd-tree search
+// The kd-tree is searched for an approximate nearest neighbor.
+// The point is returned through one of the arguments, and the
+// distance returned is the squared distance to this point.
+//
+// The method used for searching the kd-tree is an approximate
+// adaptation of the search algorithm described by Friedman,
+// Bentley, and Finkel, ``An algorithm for finding best matches
+// in logarithmic expected time,'' ACM Transactions on Mathematical
+// Software, 3(3):209-226, 1977).
+//
+// The algorithm operates recursively. When first encountering a
+// node of the kd-tree we first visit the child which is closest to
+// the query point. On return, we decide whether we want to visit
+// the other child. If the box containing the other child exceeds
+// 1/(1+eps) times the current best distance, then we skip it (since
+// any point found in this child cannot be closer to the query point
+// by more than this factor.) Otherwise, we visit it recursively.
+// The distance between a box and the query point is computed exactly
+// (not approximated as is often done in kd-tree), using incremental
+// distance updates, as described by Arya and Mount in ``Algorithms
+// for fast vector quantization,'' Proc. of DCC '93: Data Compression
+// Conference, eds. J. A. Storer and M. Cohn, IEEE Press, 1993,
+// 381-390.
+//
+// The main entry points is annkSearch() which sets things up and
+// then call the recursive routine ann_search(). This is a recursive
+// routine which performs the processing for one node in the kd-tree.
+// There are two versions of this virtual procedure, one for splitting
+// nodes and one for leaves. When a splitting node is visited, we
+// determine which child to visit first (the closer one), and visit
+// the other child on return. When a leaf is visited, we compute
+// the distances to the points in the buckets, and update information
+// on the closest points.
+//
+// Some trickery is used to incrementally update the distance from
+// a kd-tree rectangle to the query point. This comes about from
+// the fact that which each successive split, only one component
+// (along the dimension that is split) of the squared distance to
+// the child rectangle is different from the squared distance to
+// the parent rectangle.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// To keep argument lists short, a number of global variables
+// are maintained which are common to all the recursive calls.
+// These are given below.
+//----------------------------------------------------------------------
+
+int ANNkdDim; // dimension of space
+ANNpoint ANNkdQ; // query point
+double ANNkdMaxErr; // max tolerable squared error
+ANNpointArray ANNkdPts; // the points
+ANNmin_k *ANNkdPointMK; // set of k closest points
+
+//----------------------------------------------------------------------
+// annkSearch - search for the k nearest neighbors
+//----------------------------------------------------------------------
+
+void ANNkd_tree::annkSearch(
+ ANNpoint q, // the query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor indices (returned)
+ ANNdistArray dd, // the approximate nearest neighbor
+ double eps) // the error bound
+{
+
+ ANNkdDim = dim; // copy arguments to static equivs
+ ANNkdQ = q;
+ ANNkdPts = pts;
+ ANNptsVisited = 0; // initialize count of points visited
+
+ if (k > actual_num_points) { // too many near neighbors?
+ annError("Requesting more near neighbors than data points", ANNabort);
+ }
+
+ ANNkdMaxErr = ANN_POW(1.0 + eps);
+ ANN_FLOP(2) // increment floating op count
+
+ ANNkdPointMK = new ANNmin_k(k); // create set for closest k points
+ // search starting at the root
+ root->ann_search(annBoxDistance(q, bnd_box_lo, bnd_box_hi, dim));
+
+ for (int i = 0; i < k; i++) { // extract the k-th closest points
+ dd[i] = ANNkdPointMK->ith_smallest_key(i);
+ nn_idx[i] = ANNkdPointMK->ith_smallest_info(i);
+ }
+ delete ANNkdPointMK; // deallocate closest point set
+}
+
+//----------------------------------------------------------------------
+// kd_split::ann_search - search a splitting node
+//----------------------------------------------------------------------
+
+void ANNkd_split::ann_search(ANNdist box_dist)
+{
+ // check if the subtree is empty
+ if (0 == actual_num_points) return;
+ // check dist calc term condition
+ if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
+
+ // distance to cutting plane
+ ANNcoord cut_diff = ANNkdQ[cut_dim] - cut_val;
+
+ if (cut_diff < 0) { // left of cutting plane
+ child[ANN_LO]->ann_search(box_dist);// visit closer child first
+
+ ANNcoord box_diff = cd_bnds[ANN_LO] - ANNkdQ[cut_dim];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ box_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ // visit further child if close enough
+ if (box_dist * ANNkdMaxErr < ANNkdPointMK->max_key())
+ child[ANN_HI]->ann_search(box_dist);
+
+ }
+ else { // right of cutting plane
+ child[ANN_HI]->ann_search(box_dist);// visit closer child first
+
+ ANNcoord box_diff = ANNkdQ[cut_dim] - cd_bnds[ANN_HI];
+ if (box_diff < 0) // within bounds - ignore
+ box_diff = 0;
+ // distance to further box
+ box_dist = (ANNdist) ANN_SUM(box_dist,
+ ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
+
+ // visit further child if close enough
+ if (box_dist * ANNkdMaxErr < ANNkdPointMK->max_key())
+ child[ANN_LO]->ann_search(box_dist);
+
+ }
+ ANN_FLOP(10) // increment floating ops
+ ANN_SPL(1) // one more splitting node visited
+}
+
+//----------------------------------------------------------------------
+// kd_leaf::ann_search - search points in a leaf node
+// Note: The unreadability of this code is the result of
+// some fine tuning to replace indexing by pointer operations.
+//----------------------------------------------------------------------
+
+void ANNkd_leaf::ann_search(ANNdist box_dist)
+{
+ register ANNdist dist; // distance to data point
+ register ANNcoord* pp; // data coordinate pointer
+ register ANNcoord* qq; // query coordinate pointer
+ register ANNdist min_dist; // distance to k-th closest point
+ register ANNcoord t;
+ register int d;
+
+ min_dist = ANNkdPointMK->max_key(); // k-th smallest distance so far
+
+ for (int i = 0; i < n_pts; i++) { // check points in bucket
+
+ pp = ANNkdPts[bkt[i]]; // first coord of next data point
+ qq = ANNkdQ; // first coord of query point
+ dist = 0;
+
+ for(d = 0; d < ANNkdDim; d++) {
+ ANN_COORD(1) // one more coordinate hit
+ ANN_FLOP(4) // increment floating ops
+
+ t = *(qq++) - *(pp++); // compute length and adv coordinate
+ // exceeds dist to k-th smallest?
+ if( (dist = ANN_SUM(dist, ANN_POW(t))) > min_dist) {
+ break;
+ }
+ }
+
+ if (d >= ANNkdDim && // among the k best?
+ (ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
+ // add it to the list
+ ANNkdPointMK->insert(dist, bkt[i]);
+ min_dist = ANNkdPointMK->max_key();
+ }
+ }
+ ANN_LEAF(1) // one more leaf node visited
+ ANN_PTS(n_pts) // increment points visited
+ ANNptsVisited += n_pts; // increment number of points visited
+}
+
+
+
+////////////////////////////////////////////////
+// range search
+// ////////////////////////////////////////////
+
+void ANNkd_tree::range_search(const ANNorthRect& region,
+ std::vector<size_t>& point_indices)
+{
+
+ // get bounding box of the root
+ ANNorthRect bnd_box = ANNorthRect(dim, bnd_box_lo, bnd_box_hi);
+ root->range_search(region, dim, pts, bnd_box, point_indices);
+}
+
+void ANNkd_split::range_search(const ANNorthRect& region,
+ int ANNkdDim,
+ ANNpointArray ANNkdPts,
+ ANNorthRect& bnd_box,
+ std::vector<size_t>& point_indices)
+{
+ // check if the subtree is empty
+ if (0 == actual_num_points) return;
+
+ // process left child
+ ANNcoord old_bnd_box_val = bnd_box.hi[cut_dim];
+ bnd_box.hi[cut_dim] = cut_val;
+ if (region.contains(ANNkdDim, bnd_box)) {
+ child[ANN_LO]->range_search_add(point_indices);
+ } else if (region.intersects(ANNkdDim, bnd_box)) {
+ child[ANN_LO]->range_search(region, ANNkdDim, ANNkdPts, bnd_box, point_indices);
+ }
+ // restore bounding box
+ bnd_box.hi[cut_dim] = old_bnd_box_val;
+ // process right child
+ old_bnd_box_val = bnd_box.lo[cut_dim];
+ bnd_box.lo[cut_dim] = cut_val;
+ if (region.contains(ANNkdDim, bnd_box)) {
+ child[ANN_HI]->range_search_add(point_indices);
+ } else if (region.intersects(ANNkdDim, bnd_box)) {
+ child[ANN_HI]->range_search(region, ANNkdDim, ANNkdPts, bnd_box, point_indices);
+ }
+ // restore bounding box
+ bnd_box.lo[cut_dim] = old_bnd_box_val;
+}
+
+void ANNkd_leaf::range_search(const ANNorthRect& region,
+ int ANNkdDim,
+ ANNpointArray ANNkdPts,
+ ANNorthRect&, // nameless parameter to suppress
+ // warnings and allow recursion
+ // in splitting node
+ std::vector<size_t>& point_indices)
+{
+ for (int i = 0; i < n_pts; i++) { // check points in bucket
+ if (region.inside(ANNkdDim, ANNkdPts[bkt[i]]) == ANNtrue) {
+ //std::cout << "adding point, i = " << i;
+ //std::cout << ", x = " << ANNkdPts[bkt[i]][0];
+ //std::cout << ", y = " << ANNkdPts[bkt[i]][1] << std::endl;
+ point_indices.push_back(bkt[i]);
+ }
+ }
+}
+
+void ANNkd_split::range_search_add(std::vector<size_t>& point_indices)
+{
+ if ( 0 == actual_num_points )
+ return;
+ child[ANN_LO]->range_search_add(point_indices);
+ child[ANN_HI]->range_search_add(point_indices);
+}
+
+void ANNkd_leaf::range_search_add(std::vector<size_t>& point_indices)
+{
+ if ( 0 == actual_num_points )
+ return;
+ for (int i = 0; i < n_pts; i++) { // add all points in a bucket
+ //std::cout << "adding point without checking, i = " << i <<", bkt[i] = " << bkt[i] << std::endl;
+ point_indices.push_back(bkt[i]);
+ }
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_split.cpp b/geom_bottleneck/bottleneck/src/ann/kd_split.cpp
new file mode 100644
index 0000000..7979aaa
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_split.cpp
@@ -0,0 +1,632 @@
+//----------------------------------------------------------------------
+// File: kd_split.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Methods for splitting kd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+//----------------------------------------------------------------------
+
+#include "kd_tree.h" // kd-tree definitions
+#include "kd_util.h" // kd-tree utilities
+#include "kd_split.h" // splitting functions
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Constants
+//----------------------------------------------------------------------
+
+const double ERR = 0.001; // a small value
+const double FS_ASPECT_RATIO = 3.0; // maximum allowed aspect ratio
+ // in fair split. Must be >= 2.
+
+//----------------------------------------------------------------------
+// NOTE: Virtually all point indexing is done through an index (i.e.
+// permutation) array pidx. Consequently, a reference to the d-th
+// coordinate of the i-th point is pa[pidx[i]][d]. The macro PA(i,d)
+// is a shorthand for this.
+//----------------------------------------------------------------------
+ // standard 2-d indirect indexing
+#define PA(i,d) (pa[pidx[(i)]][(d)])
+ // accessing a single point
+#define PP(i) (pa[pidx[(i)]])
+
+
+//----------------------------------------------------------------------
+// kd_split - Bentley's standard splitting routine for kd-trees
+// Find the dimension of the greatest spread, and split
+// just before the median point along this dimension.
+//----------------------------------------------------------------------
+
+void kd_split(
+ ANNpointArray pa, // point array (permuted on return)
+ ANNidxArray pidx, // point indices
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo) // num of points on low side (returned)
+{
+ // find dimension of maximum spread
+ cut_dim = annMaxSpread(pa, pidx, n, dim);
+ n_lo = n/2; // median rank
+ // split about median
+ annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
+}
+
+//----------------------------------------------------------------------
+// midpt_split - midpoint splitting rule for box-decomposition trees
+//
+// This is the simplest splitting rule that guarantees boxes
+// of bounded aspect ratio. It simply cuts the box with the
+// longest side through its midpoint. If there are ties, it
+// selects the dimension with the maximum point spread.
+//
+// WARNING: This routine (while simple) doesn't seem to work
+// well in practice in high dimensions, because it tends to
+// generate a large number of trivial and/or unbalanced splits.
+// Either kd_split(), sl_midpt_split(), or fair_split() are
+// recommended, instead.
+//----------------------------------------------------------------------
+
+void midpt_split(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo) // num of points on low side (returned)
+{
+ int d;
+
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ }
+ }
+ ANNcoord max_spread = -1; // find long side with most spread
+ for (d = 0; d < dim; d++) {
+ // is it among longest?
+ if (double(bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
+ // compute its spread
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // is it max so far?
+ max_spread = spr;
+ cut_dim = d;
+ }
+ }
+ }
+ // split along cut_dim at midpoint
+ cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim]) / 2;
+ // permute points accordingly
+ int br1, br2;
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ //------------------------------------------------------------------
+ // On return: pa[0..br1-1] < cut_val
+ // pa[br1..br2-1] == cut_val
+ // pa[br2..n-1] > cut_val
+ //
+ // We can set n_lo to any value in the range [br1..br2].
+ // We choose split so that points are most evenly divided.
+ //------------------------------------------------------------------
+ if (br1 > n/2) n_lo = br1;
+ else if (br2 < n/2) n_lo = br2;
+ else n_lo = n/2;
+}
+
+//----------------------------------------------------------------------
+// sl_midpt_split - sliding midpoint splitting rule
+//
+// This is a modification of midpt_split, which has the nonsensical
+// name "sliding midpoint". The idea is that we try to use the
+// midpoint rule, by bisecting the longest side. If there are
+// ties, the dimension with the maximum spread is selected. If,
+// however, the midpoint split produces a trivial split (no points
+// on one side of the splitting plane) then we slide the splitting
+// (maintaining its orientation) until it produces a nontrivial
+// split. For example, if the splitting plane is along the x-axis,
+// and all the data points have x-coordinate less than the x-bisector,
+// then the split is taken along the maximum x-coordinate of the
+// data points.
+//
+// Intuitively, this rule cannot generate trivial splits, and
+// hence avoids midpt_split's tendency to produce trees with
+// a very large number of nodes.
+//
+//----------------------------------------------------------------------
+
+void sl_midpt_split(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo) // num of points on low side (returned)
+{
+ int d;
+
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ }
+ }
+ ANNcoord max_spread = -1; // find long side with most spread
+ for (d = 0; d < dim; d++) {
+ // is it among longest?
+ if ((bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
+ // compute its spread
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // is it max so far?
+ max_spread = spr;
+ cut_dim = d;
+ }
+ }
+ }
+ // ideal split at midpoint
+ ANNcoord ideal_cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim])/2;
+
+ ANNcoord min, max;
+ annMinMax(pa, pidx, n, cut_dim, min, max); // find min/max coordinates
+
+ if (ideal_cut_val < min) // slide to min or max as needed
+ cut_val = min;
+ else if (ideal_cut_val > max)
+ cut_val = max;
+ else
+ cut_val = ideal_cut_val;
+
+ // permute points accordingly
+ int br1, br2;
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ //------------------------------------------------------------------
+ // On return: pa[0..br1-1] < cut_val
+ // pa[br1..br2-1] == cut_val
+ // pa[br2..n-1] > cut_val
+ //
+ // We can set n_lo to any value in the range [br1..br2] to satisfy
+ // the exit conditions of the procedure.
+ //
+ // if ideal_cut_val < min (implying br2 >= 1),
+ // then we select n_lo = 1 (so there is one point on left) and
+ // if ideal_cut_val > max (implying br1 <= n-1),
+ // then we select n_lo = n-1 (so there is one point on right).
+ // Otherwise, we select n_lo as close to n/2 as possible within
+ // [br1..br2].
+ //------------------------------------------------------------------
+ if (ideal_cut_val < min) n_lo = 1;
+ else if (ideal_cut_val > max) n_lo = n-1;
+ else if (br1 > n/2) n_lo = br1;
+ else if (br2 < n/2) n_lo = br2;
+ else n_lo = n/2;
+}
+
+//----------------------------------------------------------------------
+// fair_split - fair-split splitting rule
+//
+// This is a compromise between the kd-tree splitting rule (which
+// always splits data points at their median) and the midpoint
+// splitting rule (which always splits a box through its center.
+// The goal of this procedure is to achieve both nicely balanced
+// splits, and boxes of bounded aspect ratio.
+//
+// A constant FS_ASPECT_RATIO is defined. Given a box, those sides
+// which can be split so that the ratio of the longest to shortest
+// side does not exceed ASPECT_RATIO are identified. Among these
+// sides, we select the one in which the points have the largest
+// spread. We then split the points in a manner which most evenly
+// distributes the points on either side of the splitting plane,
+// subject to maintaining the bound on the ratio of long to short
+// sides. To determine that the aspect ratio will be preserved,
+// we determine the longest side (other than this side), and
+// determine how narrowly we can cut this side, without causing the
+// aspect ratio bound to be exceeded (small_piece).
+//
+// This procedure is more robust than either kd_split or midpt_split,
+// but is more complicated as well. When point distribution is
+// extremely skewed, this degenerates to midpt_split (actually
+// 1/3 point split), and when the points are most evenly distributed,
+// this degenerates to kd-split.
+//----------------------------------------------------------------------
+
+void fair_split(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo) // num of points on low side (returned)
+{
+ int d;
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ cut_dim = 0;
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ cut_dim = d;
+ }
+ }
+
+ ANNcoord max_spread = 0; // find legal cut with max spread
+ cut_dim = 0;
+ for (d = 0; d < dim; d++) {
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ // is this side midpoint splitable
+ // without violating aspect ratio?
+ if (((double) max_length)*2.0/((double) length) <= FS_ASPECT_RATIO) {
+ // compute spread along this dim
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // best spread so far
+ max_spread = spr;
+ cut_dim = d; // this is dimension to cut
+ }
+ }
+ }
+
+ max_length = 0; // find longest side other than cut_dim
+ for (d = 0; d < dim; d++) {
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (d != cut_dim && length > max_length)
+ max_length = length;
+ }
+ // consider most extreme splits
+ ANNcoord small_piece = max_length / FS_ASPECT_RATIO;
+ ANNcoord lo_cut = bnds.lo[cut_dim] + small_piece;// lowest legal cut
+ ANNcoord hi_cut = bnds.hi[cut_dim] - small_piece;// highest legal cut
+
+ int br1, br2;
+ // is median below lo_cut ?
+ if (annSplitBalance(pa, pidx, n, cut_dim, lo_cut) >= 0) {
+ cut_val = lo_cut; // cut at lo_cut
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = br1;
+ }
+ // is median above hi_cut?
+ else if (annSplitBalance(pa, pidx, n, cut_dim, hi_cut) <= 0) {
+ cut_val = hi_cut; // cut at hi_cut
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = br2;
+ }
+ else { // median cut preserves asp ratio
+ n_lo = n/2; // split about median
+ annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
+ }
+}
+
+//----------------------------------------------------------------------
+// sl_fair_split - sliding fair split splitting rule
+//
+// Sliding fair split is a splitting rule that combines the
+// strengths of both fair split with sliding midpoint split.
+// Fair split tends to produce balanced splits when the points
+// are roughly uniformly distributed, but it can produce many
+// trivial splits when points are highly clustered. Sliding
+// midpoint never produces trivial splits, and shrinks boxes
+// nicely if points are highly clustered, but it may produce
+// rather unbalanced splits when points are unclustered but not
+// quite uniform.
+//
+// Sliding fair split is based on the theory that there are two
+// types of splits that are "good": balanced splits that produce
+// fat boxes, and unbalanced splits provided the cell with fewer
+// points is fat.
+//
+// This splitting rule operates by first computing the longest
+// side of the current bounding box. Then it asks which sides
+// could be split (at the midpoint) and still satisfy the aspect
+// ratio bound with respect to this side. Among these, it selects
+// the side with the largest spread (as fair split would). It
+// then considers the most extreme cuts that would be allowed by
+// the aspect ratio bound. This is done by dividing the longest
+// side of the box by the aspect ratio bound. If the median cut
+// lies between these extreme cuts, then we use the median cut.
+// If not, then consider the extreme cut that is closer to the
+// median. If all the points lie to one side of this cut, then
+// we slide the cut until it hits the first point. This may
+// violate the aspect ratio bound, but will never generate empty
+// cells. However the sibling of every such skinny cell is fat,
+// and hence packing arguments still apply.
+//
+//----------------------------------------------------------------------
+
+void sl_fair_split(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo) // num of points on low side (returned)
+{
+ int d;
+ ANNcoord min, max; // min/max coordinates
+ int br1, br2; // split break points
+
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ cut_dim = 0;
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ cut_dim = d;
+ }
+ }
+
+ ANNcoord max_spread = 0; // find legal cut with max spread
+ cut_dim = 0;
+ for (d = 0; d < dim; d++) {
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ // is this side midpoint splitable
+ // without violating aspect ratio?
+ if (((double) max_length)*2.0/((double) length) <= FS_ASPECT_RATIO) {
+ // compute spread along this dim
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // best spread so far
+ max_spread = spr;
+ cut_dim = d; // this is dimension to cut
+ }
+ }
+ }
+
+ max_length = 0; // find longest side other than cut_dim
+ for (d = 0; d < dim; d++) {
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (d != cut_dim && length > max_length)
+ max_length = length;
+ }
+ // consider most extreme splits
+ ANNcoord small_piece = max_length / FS_ASPECT_RATIO;
+ ANNcoord lo_cut = bnds.lo[cut_dim] + small_piece;// lowest legal cut
+ ANNcoord hi_cut = bnds.hi[cut_dim] - small_piece;// highest legal cut
+ // find min and max along cut_dim
+ annMinMax(pa, pidx, n, cut_dim, min, max);
+ // is median below lo_cut?
+ if (annSplitBalance(pa, pidx, n, cut_dim, lo_cut) >= 0) {
+ if (max > lo_cut) { // are any points above lo_cut?
+ cut_val = lo_cut; // cut at lo_cut
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = br1; // balance if there are ties
+ }
+ else { // all points below lo_cut
+ cut_val = max; // cut at max value
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = n-1;
+ }
+ }
+ // is median above hi_cut?
+ else if (annSplitBalance(pa, pidx, n, cut_dim, hi_cut) <= 0) {
+ if (min < hi_cut) { // are any points below hi_cut?
+ cut_val = hi_cut; // cut at hi_cut
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = br2; // balance if there are ties
+ }
+ else { // all points above hi_cut
+ cut_val = min; // cut at min value
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ n_lo = 1;
+ }
+ }
+ else { // median cut is good enough
+ n_lo = n/2; // split about median
+ annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
+ }
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////
+// for kd-trees with deletion
+//
+//----------------------------------------------------------------------
+// kd_split - Bentley's standard splitting routine for kd-trees
+// Find the dimension of the greatest spread, and split
+// just before the median point along this dimension.
+//----------------------------------------------------------------------
+
+void kd_split_wd(
+ ANNpointArray pa, // point array (permuted on return)
+ ANNidxArray pidx, // point indices
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx) // index of cutting point (returned)
+{
+ // find dimension of maximum spread
+ cut_dim = annMaxSpread(pa, pidx, n, dim);
+ n_lo = n/2; // median rank
+ // split about median
+ annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
+ cut_pt_idx = n_lo;
+ cut_val = PA(cut_pt_idx, cut_dim);
+}
+
+//----------------------------------------------------------------------
+// midpt_split - midpoint splitting rule for box-decomposition trees
+//
+// This is the simplest splitting rule that guarantees boxes
+// of bounded aspect ratio. It simply cuts the box with the
+// longest side through its midpoint. If there are ties, it
+// selects the dimension with the maximum point spread.
+//
+// WARNING: This routine (while simple) doesn't seem to work
+// well in practice in high dimensions, because it tends to
+// generate a large number of trivial and/or unbalanced splits.
+// Either kd_split(), sl_midpt_split(), or fair_split() are
+// recommended, instead.
+//----------------------------------------------------------------------
+
+void midpt_split_wd(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx) // index of cutting point (returned)
+{
+ int d;
+
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ }
+ }
+ ANNcoord max_spread = -1; // find long side with most spread
+ for (d = 0; d < dim; d++) {
+ // is it among longest?
+ if (double(bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
+ // compute its spread
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // is it max so far?
+ max_spread = spr;
+ cut_dim = d;
+ }
+ }
+ }
+ // split along cut_dim at midpoint
+ cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim]) / 2;
+ // permute points accordingly
+ int br1, br2;
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ //------------------------------------------------------------------
+ // On return: pa[0..br1-1] < cut_val
+ // pa[br1..br2-1] == cut_val
+ // pa[br2..n-1] > cut_val
+ //
+ // We can set n_lo to any value in the range [br1..br2].
+ // We choose split so that points are most evenly divided.
+ //------------------------------------------------------------------
+ if (br1 > n/2) n_lo = br1;
+ else if (br2 < n/2) n_lo = br2;
+ else n_lo = n/2;
+
+ cut_pt_idx = n_lo;
+ cut_val = PA(cut_pt_idx, cut_dim);
+
+}
+
+//----------------------------------------------------------------------
+// sl_midpt_split - sliding midpoint splitting rule
+//
+// This is a modification of midpt_split, which has the nonsensical
+// name "sliding midpoint". The idea is that we try to use the
+// midpoint rule, by bisecting the longest side. If there are
+// ties, the dimension with the maximum spread is selected. If,
+// however, the midpoint split produces a trivial split (no points
+// on one side of the splitting plane) then we slide the splitting
+// (maintaining its orientation) until it produces a nontrivial
+// split. For example, if the splitting plane is along the x-axis,
+// and all the data points have x-coordinate less than the x-bisector,
+// then the split is taken along the maximum x-coordinate of the
+// data points.
+//
+// Intuitively, this rule cannot generate trivial splits, and
+// hence avoids midpt_split's tendency to produce trees with
+// a very large number of nodes.
+//
+//----------------------------------------------------------------------
+
+void sl_midpt_split_wd(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices (permuted on return)
+ const ANNorthRect &bnds, // bounding rectangle for cell
+ int n, // number of points
+ int dim, // dimension of space
+ int &cut_dim, // cutting dimension (returned)
+ ANNcoord &cut_val, // cutting value (returned)
+ int &n_lo, // num of points on low side (returned)
+ int &cut_pt_idx) // index of cutting point (returned)
+{
+ int d;
+
+ ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
+ for (d = 1; d < dim; d++) { // find length of longest box side
+ ANNcoord length = bnds.hi[d] - bnds.lo[d];
+ if (length > max_length) {
+ max_length = length;
+ }
+ }
+ ANNcoord max_spread = -1; // find long side with most spread
+ for (d = 0; d < dim; d++) {
+ // is it among longest?
+ if ((bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
+ // compute its spread
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spread) { // is it max so far?
+ max_spread = spr;
+ cut_dim = d;
+ }
+ }
+ }
+ // ideal split at midpoint
+ ANNcoord ideal_cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim])/2;
+
+ ANNcoord min, max;
+ annMinMax(pa, pidx, n, cut_dim, min, max); // find min/max coordinates
+
+ if (ideal_cut_val < min) // slide to min or max as needed
+ cut_val = min;
+ else if (ideal_cut_val > max)
+ cut_val = max;
+ else
+ cut_val = ideal_cut_val;
+
+ // permute points accordingly
+ int br1, br2;
+ annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
+ //------------------------------------------------------------------
+ // On return: pa[0..br1-1] < cut_val
+ // pa[br1..br2-1] == cut_val
+ // pa[br2..n-1] > cut_val
+ //
+ // We can set n_lo to any value in the range [br1..br2] to satisfy
+ // the exit conditions of the procedure.
+ //
+ // if ideal_cut_val < min (implying br2 >= 1),
+ // then we select n_lo = 1 (so there is one point on left) and
+ // if ideal_cut_val > max (implying br1 <= n-1),
+ // then we select n_lo = n-1 (so there is one point on right).
+ // Otherwise, we select n_lo as close to n/2 as possible within
+ // [br1..br2].
+ //------------------------------------------------------------------
+ if (ideal_cut_val < min) n_lo = 1;
+ else if (ideal_cut_val > max) n_lo = n-1;
+ else if (br1 > n/2) n_lo = br1;
+ else if (br2 < n/2) n_lo = br2;
+ else n_lo = n/2;
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_tree.cpp b/geom_bottleneck/bottleneck/src/ann/kd_tree.cpp
new file mode 100644
index 0000000..ad3a82d
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_tree.cpp
@@ -0,0 +1,560 @@
+//----------------------------------------------------------------------
+// File: kd_tree.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Basic methods for kd-trees.
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.0 04/01/05
+// Increased aspect ratio bound (ANN_AR_TOOBIG) from 100 to 1000.
+// Fixed leaf counts to count trivial leaves.
+// Added optional pa, pi arguments to Skeleton kd_tree constructor
+// for use in load constructor.
+// Added annClose() to eliminate KD_TRIVIAL memory leak.
+// --------------------------------------------------------------------
+// 2015 - modified by A. Nigmetov to support deletion of points
+//----------------------------------------------------------------------
+
+#ifdef _WIN32
+#include <ciso646> // make VS more conformal
+#endif
+
+#include "kd_tree.h" // kd-tree declarations
+#include "kd_split.h" // kd-tree splitting rules
+#include "kd_util.h" // kd-tree utilities
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// Global data
+//
+// For some splitting rules, especially with small bucket sizes,
+// it is possible to generate a large number of empty leaf nodes.
+// To save storage we allocate a single trivial leaf node which
+// contains no points. For messy coding reasons it is convenient
+// to have it reference a trivial point index.
+//
+// KD_TRIVIAL is allocated when the first kd-tree is created. It
+// must *never* deallocated (since it may be shared by more than
+// one tree).
+//----------------------------------------------------------------------
+static int IDX_TRIVIAL[] = {0}; // trivial point index
+ANNkd_leaf *KD_TRIVIAL = NULL; // trivial leaf node
+
+//----------------------------------------------------------------------
+// Printing the kd-tree
+// These routines print a kd-tree in reverse inorder (high then
+// root then low). (This is so that if you look at the output
+// from the right side it appear from left to right in standard
+// inorder.) When outputting leaves we output only the point
+// indices rather than the point coordinates. There is an option
+// to print the point coordinates separately.
+//
+// The tree printing routine calls the printing routines on the
+// individual nodes of the tree, passing in the level or depth
+// in the tree. The level in the tree is used to print indentation
+// for readability.
+//----------------------------------------------------------------------
+
+void ANNkd_split::print( // print splitting node
+ int level, // depth of node in tree
+ ostream &out) // output stream
+{
+ child[ANN_HI]->print(level+1, out); // print high child
+ out << " ";
+ for (int i = 0; i < level; i++) // print indentation
+ out << "..";
+ out << "Split cd=" << cut_dim << " cv=" << cut_val;
+ out << " lbnd=" << cd_bnds[ANN_LO];
+ out << " hbnd=" << cd_bnds[ANN_HI];
+ out << " np=" << actual_num_points;
+ out << "\n";
+ child[ANN_LO]->print(level+1, out); // print low child
+}
+
+void ANNkd_leaf::print( // print leaf node
+ int level, // depth of node in tree
+ ostream &out) // output stream
+{
+
+ out << " ";
+ for (int i = 0; i < level; i++) // print indentation
+ out << "..";
+
+ if (this == KD_TRIVIAL) { // canonical trivial leaf node
+ out << "Leaf (trivial)\n";
+ }
+ else{
+ out << "Leaf n=" << n_pts << " <";
+ for (int j = 0; j < n_pts; j++) {
+ out << bkt[j];
+ if (j < n_pts-1) out << ",";
+ }
+ out << ">\n";
+ }
+}
+
+void ANNkd_tree::Print( // print entire tree
+ ANNbool with_pts, // print points as well?
+ ostream &out) // output stream
+{
+ out << "ANN Version " << ANNversion << "\n";
+ if (with_pts) { // print point coordinates
+ out << " Points:\n";
+ for (int i = 0; i < n_pts; i++) {
+ out << "\t" << i << ": ";
+ annPrintPt(pts[i], dim, out);
+ out << "\n";
+ }
+ }
+ if (root == NULL) // empty tree?
+ out << " Null tree.\n";
+ else {
+ root->print(0, out); // invoke printing at root
+ }
+}
+
+//----------------------------------------------------------------------
+// kd_tree statistics (for performance evaluation)
+// This routine compute various statistics information for
+// a kd-tree. It is used by the implementors for performance
+// evaluation of the data structure.
+//----------------------------------------------------------------------
+
+#define MAX(a,b) ((a) > (b) ? (a) : (b))
+
+void ANNkdStats::merge(const ANNkdStats &st) // merge stats from child
+{
+ n_lf += st.n_lf; n_tl += st.n_tl;
+ n_spl += st.n_spl; n_shr += st.n_shr;
+ depth = MAX(depth, st.depth);
+ sum_ar += st.sum_ar;
+}
+
+//----------------------------------------------------------------------
+// Update statistics for nodes
+//----------------------------------------------------------------------
+
+const double ANN_AR_TOOBIG = 1000; // too big an aspect ratio
+
+void ANNkd_leaf::getStats( // get subtree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // stats (modified)
+ ANNorthRect &bnd_box) // bounding box
+{
+ st.reset();
+ st.n_lf = 1; // count this leaf
+ if (this == KD_TRIVIAL) st.n_tl = 1; // count trivial leaf
+ double ar = annAspectRatio(dim, bnd_box); // aspect ratio of leaf
+ // incr sum (ignore outliers)
+ st.sum_ar += float(ar < ANN_AR_TOOBIG ? ar : ANN_AR_TOOBIG);
+}
+
+void ANNkd_split::getStats( // get subtree statistics
+ int dim, // dimension of space
+ ANNkdStats &st, // stats (modified)
+ ANNorthRect &bnd_box) // bounding box
+{
+ ANNkdStats ch_stats; // stats for children
+ // get stats for low child
+ ANNcoord hv = bnd_box.hi[cut_dim]; // save box bounds
+ bnd_box.hi[cut_dim] = cut_val; // upper bound for low child
+ ch_stats.reset(); // reset
+ child[ANN_LO]->getStats(dim, ch_stats, bnd_box);
+ st.merge(ch_stats); // merge them
+ bnd_box.hi[cut_dim] = hv; // restore bound
+ // get stats for high child
+ ANNcoord lv = bnd_box.lo[cut_dim]; // save box bounds
+ bnd_box.lo[cut_dim] = cut_val; // lower bound for high child
+ ch_stats.reset(); // reset
+ child[ANN_HI]->getStats(dim, ch_stats, bnd_box);
+ st.merge(ch_stats); // merge them
+ bnd_box.lo[cut_dim] = lv; // restore bound
+
+ st.depth++; // increment depth
+ st.n_spl++; // increment number of splits
+}
+
+//----------------------------------------------------------------------
+// getStats
+// Collects a number of statistics related to kd_tree or
+// bd_tree.
+//----------------------------------------------------------------------
+
+void ANNkd_tree::getStats( // get tree statistics
+ ANNkdStats &st) // stats (modified)
+{
+ st.reset(dim, n_pts, bkt_size); // reset stats
+ // create bounding box
+ ANNorthRect bnd_box(dim, bnd_box_lo, bnd_box_hi);
+ if (root != NULL) { // if nonempty tree
+ root->getStats(dim, st, bnd_box); // get statistics
+ st.avg_ar = st.sum_ar / st.n_lf; // average leaf asp ratio
+ }
+}
+
+//----------------------------------------------------------------------
+// kd_tree destructor
+// The destructor just frees the various elements that were
+// allocated in the construction process.
+//----------------------------------------------------------------------
+
+ANNkd_tree::~ANNkd_tree() // tree destructor
+{
+ if (root != NULL and root != KD_TRIVIAL) delete root;
+ if (pidx != NULL) delete [] pidx;
+ if (bnd_box_lo != NULL) annDeallocPt(bnd_box_lo);
+ if (bnd_box_hi != NULL) annDeallocPt(bnd_box_hi);
+}
+
+//----------------------------------------------------------------------
+// This is called with all use of ANN is finished. It eliminates the
+// minor memory leak caused by the allocation of KD_TRIVIAL.
+//----------------------------------------------------------------------
+void annClose() // close use of ANN
+{
+ if (KD_TRIVIAL != NULL) {
+ delete KD_TRIVIAL;
+ KD_TRIVIAL = NULL;
+ }
+}
+
+//----------------------------------------------------------------------
+// kd_tree constructors
+// There is a skeleton kd-tree constructor which sets up a
+// trivial empty tree. The last optional argument allows
+// the routine to be passed a point index array which is
+// assumed to be of the proper size (n). Otherwise, one is
+// allocated and initialized to the identity. Warning: In
+// either case the destructor will deallocate this array.
+//
+// As a kludge, we need to allocate KD_TRIVIAL if one has not
+// already been allocated. (This is because I'm too dumb to
+// figure out how to cause a pointer to be allocated at load
+// time.)
+//----------------------------------------------------------------------
+
+void ANNkd_tree::SkeletonTree( // construct skeleton tree
+ int n, // number of points
+ int dd, // dimension
+ int bs, // bucket size
+ ANNpointArray pa, // point array
+ ANNidxArray pi) // point indices
+{
+ dim = dd; // initialize basic elements
+ n_pts = n;
+ bkt_size = bs;
+ pts = pa; // initialize points array
+
+ root = NULL; // no associated tree yet
+
+ if (pi == NULL) { // point indices provided?
+ pidx = new ANNidx[n]; // no, allocate space for point indices
+ for (int i = 0; i < n; i++) {
+ pidx[i] = i; // initially identity
+ }
+ }
+ else {
+ pidx = pi; // yes, use them
+ }
+
+ bnd_box_lo = bnd_box_hi = NULL; // bounding box is nonexistent
+ if (KD_TRIVIAL == NULL) // no trivial leaf node yet?
+ KD_TRIVIAL = new ANNkd_leaf(0, IDX_TRIVIAL); // allocate it
+
+ // for deletion
+ pointToLeafVec.clear();
+ pointToLeafVec.reserve(n_pts);
+ for(int k = 0; k < n_pts; ++k) {
+ pointToLeafVec.push_back(NULL);
+ }
+}
+
+ANNkd_tree::ANNkd_tree( // basic constructor
+ int n, // number of points
+ int dd, // dimension
+ int bs) // bucket size
+{ SkeletonTree(n, dd, bs); } // construct skeleton tree
+
+
+
+//----------------------------------------------------------------------
+// rkd_tree - recursive procedure to build a kd-tree
+//
+// Builds a kd-tree for points in pa as indexed through the
+// array pidx[0..n-1] (typically a subarray of the array used in
+// the top-level call). This routine permutes the array pidx,
+// but does not alter pa[].
+//
+// The construction is based on a standard algorithm for constructing
+// the kd-tree (see Friedman, Bentley, and Finkel, ``An algorithm for
+// finding best matches in logarithmic expected time,'' ACM Transactions
+// on Mathematical Software, 3(3):209-226, 1977). The procedure
+// operates by a simple divide-and-conquer strategy, which determines
+// an appropriate orthogonal cutting plane (see below), and splits
+// the points. When the number of points falls below the bucket size,
+// we simply store the points in a leaf node's bucket.
+//
+// One of the arguments is a pointer to a splitting routine,
+// whose prototype is:
+//
+// void split(
+// ANNpointArray pa, // complete point array
+// ANNidxArray pidx, // point array (permuted on return)
+// ANNorthRect &bnds, // bounds of current cell
+// int n, // number of points
+// int dim, // dimension of space
+// int &cut_dim, // cutting dimension
+// ANNcoord &cut_val, // cutting value
+// int &n_lo) // no. of points on low side of cut
+//
+// This procedure selects a cutting dimension and cutting value,
+// partitions pa about these values, and returns the number of
+// points on the low side of the cut.
+//----------------------------------------------------------------------
+
+ANNkd_ptr rkd_tree( // recursive construction of kd-tree
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ int bsp, // bucket space
+ ANNorthRect &bnd_box, // bounding box for current node
+ ANNkd_splitter splitter, // splitting routine
+ vector<ANNkd_leaf*>* ppointToLeafVec)
+{
+ if (n <= bsp) { // n small, make a leaf node
+ if (n == 0) // empty leaf node
+ return KD_TRIVIAL; // return (canonical) empty leaf
+ else { // construct the node and return
+ ANNkd_leaf* res = new ANNkd_leaf(n, pidx);
+ if ( 1 == bsp) {
+ (*ppointToLeafVec)[*pidx] = res;
+ }
+ return res;
+ }
+ }
+ else { // n large, make a splitting node
+ int cd; // cutting dimension
+ ANNcoord cv; // cutting value
+ int n_lo; // number on low side of cut
+ ANNkd_node *lo, *hi; // low and high children
+
+ // invoke splitting procedure
+ (*splitter)(pa, pidx, bnd_box, n, dim, cd, cv, n_lo);
+
+ ANNcoord lv = bnd_box.lo[cd]; // save bounds for cutting dimension
+ ANNcoord hv = bnd_box.hi[cd];
+
+ bnd_box.hi[cd] = cv; // modify bounds for left subtree
+ lo = rkd_tree( // build left subtree
+ pa, pidx, n_lo, // ...from pidx[0..n_lo-1]
+ dim, bsp, bnd_box, splitter, ppointToLeafVec);
+ bnd_box.hi[cd] = hv; // restore bounds
+
+ bnd_box.lo[cd] = cv; // modify bounds for right subtree
+ hi = rkd_tree( // build right subtree
+ pa, pidx + n_lo, n-n_lo,// ...from pidx[n_lo..n-1]
+ dim, bsp, bnd_box, splitter, ppointToLeafVec);
+ bnd_box.lo[cd] = lv; // restore bounds
+
+ // create the splitting node
+ ANNkd_split *ptr = new ANNkd_split(cd, cv, lv, hv, lo, hi);
+ if (lo != KD_TRIVIAL)
+ lo->setParent(ptr);
+ if (hi != KD_TRIVIAL)
+ hi->setParent(ptr);
+ ptr->setNumPoints(lo->getNumPoints() + hi->getNumPoints());
+
+ return ptr; // return pointer to this node
+ }
+}
+
+// for kd-trees with deletion
+/*
+ANNkd_ptr rkd_tree_wd( // recursive construction of kd-tree
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices to store in subtree
+ int n, // number of points
+ int dim, // dimension of space
+ int bsp, // bucket space
+ ANNorthRect &bnd_box, // bounding box for current node
+ ANNkd_splitter_wd splitter) // splitting routine
+{
+ ANNidx cut_pt_idx;
+ if (n <= bsp) { // n small, make a leaf node
+ if (n == 0) // empty leaf node
+ return KD_TRIVIAL; // return (canonical) empty leaf
+ else // construct the node and return
+ return new ANNkd_leaf(n, pidx);
+ }
+ else { // n large, make a splitting node
+ int cd; // cutting dimension
+ ANNcoord cv; // cutting value
+ int n_lo; // number on low side of cut
+ ANNkd_node *lo, *hi; // low and high children
+
+ // invoke splitting procedure
+ (*splitter)(pa, pidx, bnd_box, n, dim, cd, cv, n_lo, cut_pt_idx);
+
+ ANNcoord lv = bnd_box.lo[cd]; // save bounds for cutting dimension
+ ANNcoord hv = bnd_box.hi[cd];
+
+ bnd_box.hi[cd] = cv; // modify bounds for left subtree
+ lo = rkd_tree_wd( // build left subtree
+ pa, pidx, n_lo, // ...from pidx[0..n_lo-1]
+ dim, bsp, bnd_box, splitter);
+ bnd_box.hi[cd] = hv; // restore bounds
+
+ bnd_box.lo[cd] = cv; // modify bounds for right subtree
+ hi = rkd_tree_wd( // build right subtree
+ pa, pidx + n_lo, n-n_lo,// ...from pidx[n_lo..n-1]
+ dim, bsp, bnd_box, splitter);
+ bnd_box.lo[cd] = lv; // restore bounds
+
+ // create the splitting node
+ ANNkd_split *ptr = new ANNkd_split(cd, cv, lv, hv, lo, hi, cut_pt_idx);
+
+ return ptr; // return pointer to this node
+ }
+}
+*/
+
+//----------------------------------------------------------------------
+// kd-tree constructor
+// This is the main constructor for kd-trees given a set of points.
+// It first builds a skeleton tree, then computes the bounding box
+// of the data points, and then invokes rkd_tree() to actually
+// build the tree, passing it the appropriate splitting routine.
+//----------------------------------------------------------------------
+
+ANNkd_tree::ANNkd_tree( // construct from point array
+ ANNpointArray pa, // point array (with at least n pts)
+ int n, // number of points
+ int dd, // dimension
+ int bs, // bucket size
+ ANNsplitRule split) // splitting method
+{
+ SkeletonTree(n, dd, bs); // set up the basic stuff
+ pts = pa; // where the points are
+ actual_num_points = n;
+ if (n == 0) return; // no points--no sweat
+
+ ANNorthRect bnd_box(dd); // bounding box for points
+ annEnclRect(pa, pidx, n, dd, bnd_box);// construct bounding rectangle
+ // copy to tree structure
+ bnd_box_lo = annCopyPt(dd, bnd_box.lo);
+ bnd_box_hi = annCopyPt(dd, bnd_box.hi);
+
+ switch (split) { // build by rule
+ case ANN_KD_STD: // standard kd-splitting rule
+ root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, kd_split, &pointToLeafVec);
+ break;
+ case ANN_KD_MIDPT: // midpoint split
+ root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, midpt_split, &pointToLeafVec);
+ break;
+ case ANN_KD_FAIR: // fair split
+ root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, fair_split, &pointToLeafVec);
+ break;
+ case ANN_KD_SUGGEST: // best (in our opinion)
+ case ANN_KD_SL_MIDPT: // sliding midpoint split
+ root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, sl_midpt_split, &pointToLeafVec);
+ break;
+ case ANN_KD_SL_FAIR: // sliding fair split
+ root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, sl_fair_split, &pointToLeafVec);
+ break;
+ // for kd-trees with deletion
+ /*
+ //case ANN_KD_SUGGEST:
+ case ANN_KD_STD_WD:
+ root = rkd_tree_wd(pa, pidx, n, dd, bs, bnd_box, kd_split_wd);
+ break;
+ case ANN_KD_MIDPT_WD:
+ root = rkd_tree_wd(pa, pidx, n, dd, bs, bnd_box, kd_split_wd);
+ break;
+ case ANN_KD_SL_MIDPT_WD:
+ root = rkd_tree_wd(pa, pidx, n, dd, bs, bnd_box, kd_split_wd);
+ break;
+ */
+ default:
+ annError("Illegal splitting method", ANNabort);
+ }
+}
+
+
+// deletion code
+//
+//
+//
+//
+//
+void ANNkd_tree::delete_point(const int point_idx)
+{
+ // range check
+ assert(0 <= point_idx and point_idx < n_pts);
+ assert(actual_num_points > 0);
+ // if this is the first deletion,
+ // initialize isDeleted vector
+ if (isDeleted.empty()) {
+ isDeleted.reserve(n_pts);
+ for(size_t k = 0; k < n_pts; ++k) {
+ isDeleted.push_back(false);
+ }
+ }
+ // points shouldn't be deleted twice
+ assert(!isDeleted[point_idx]);
+ assert(root != NULL);
+ ANNkd_leaf* leafWithPoint = pointToLeafVec.at(point_idx);
+ assert(leafWithPoint != NULL);
+ // if leafWithPoint != root,
+ // its parent will delete the leaf
+ pointToLeafVec.at(point_idx)->delete_point(point_idx, leafWithPoint != root);
+ if (leafWithPoint == root) {
+ // we had only one point,
+ // so the tree must delete it
+ root = KD_TRIVIAL;
+ delete leafWithPoint;
+ }
+ isDeleted[point_idx] = true;
+ actual_num_points--;
+}
+
+void ANNkd_leaf::delete_point(const int point_idx, const bool killYourself)
+{
+ assert(n_pts == 1);
+ assert(bkt[0] == point_idx);
+ ANNkd_split* myPar = parent;
+ while(myPar != NULL) {
+ myPar->decNumPoints();
+ myPar = myPar->getParent();
+ }
+ if (parent != NULL)
+ parent->delete_leaf(this);
+ if (killYourself)
+ delete this;
+}
+
+void ANNkd_split::delete_leaf(ANNkd_leaf* childToDelete)
+{
+ assert(child[ANN_LO] == childToDelete or child[ANN_HI] == childToDelete);
+ if (child[ANN_LO] == childToDelete)
+ child[ANN_LO] = KD_TRIVIAL;
+ else
+ child[ANN_HI] = KD_TRIVIAL;
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/ann/kd_util.cpp b/geom_bottleneck/bottleneck/src/ann/kd_util.cpp
new file mode 100644
index 0000000..02b35c4
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/ann/kd_util.cpp
@@ -0,0 +1,441 @@
+//----------------------------------------------------------------------
+// File: kd_util.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Common utilities for kd-trees
+// Last modified: 01/04/05 (Version 1.0)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+//----------------------------------------------------------------------
+
+#include "kd_util.h" // kd-utility declarations
+
+#include <ANN/ANNperf.h> // performance evaluation
+
+namespace geom_bt {
+//----------------------------------------------------------------------
+// The following routines are utility functions for manipulating
+// points sets, used in determining splitting planes for kd-tree
+// construction.
+//----------------------------------------------------------------------
+
+//----------------------------------------------------------------------
+// NOTE: Virtually all point indexing is done through an index (i.e.
+// permutation) array pidx. Consequently, a reference to the d-th
+// coordinate of the i-th point is pa[pidx[i]][d]. The macro PA(i,d)
+// is a shorthand for this.
+//----------------------------------------------------------------------
+ // standard 2-d indirect indexing
+#define PA(i,d) (pa[pidx[(i)]][(d)])
+ // accessing a single point
+#define PP(i) (pa[pidx[(i)]])
+
+//----------------------------------------------------------------------
+// annAspectRatio
+// Compute the aspect ratio (ratio of longest to shortest side)
+// of a rectangle.
+//----------------------------------------------------------------------
+
+double annAspectRatio(
+ int dim, // dimension
+ const ANNorthRect &bnd_box) // bounding cube
+{
+ ANNcoord length = bnd_box.hi[0] - bnd_box.lo[0];
+ ANNcoord min_length = length; // min side length
+ ANNcoord max_length = length; // max side length
+ for (int d = 0; d < dim; d++) {
+ length = bnd_box.hi[d] - bnd_box.lo[d];
+ if (length < min_length) min_length = length;
+ if (length > max_length) max_length = length;
+ }
+ return max_length/min_length;
+}
+
+//----------------------------------------------------------------------
+// annEnclRect, annEnclCube
+// These utilities compute the smallest rectangle and cube enclosing
+// a set of points, respectively.
+//----------------------------------------------------------------------
+
+void annEnclRect(
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension
+ ANNorthRect &bnds) // bounding cube (returned)
+{
+ for (int d = 0; d < dim; d++) { // find smallest enclosing rectangle
+ ANNcoord lo_bnd = PA(0,d); // lower bound on dimension d
+ ANNcoord hi_bnd = PA(0,d); // upper bound on dimension d
+ for (int i = 0; i < n; i++) {
+ if (PA(i,d) < lo_bnd) lo_bnd = PA(i,d);
+ else if (PA(i,d) > hi_bnd) hi_bnd = PA(i,d);
+ }
+ bnds.lo[d] = lo_bnd;
+ bnds.hi[d] = hi_bnd;
+ }
+}
+
+void annEnclCube( // compute smallest enclosing cube
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension
+ ANNorthRect &bnds) // bounding cube (returned)
+{
+ int d;
+ // compute smallest enclosing rect
+ annEnclRect(pa, pidx, n, dim, bnds);
+
+ ANNcoord max_len = 0; // max length of any side
+ for (d = 0; d < dim; d++) { // determine max side length
+ ANNcoord len = bnds.hi[d] - bnds.lo[d];
+ if (len > max_len) { // update max_len if longest
+ max_len = len;
+ }
+ }
+ for (d = 0; d < dim; d++) { // grow sides to match max
+ ANNcoord len = bnds.hi[d] - bnds.lo[d];
+ ANNcoord half_diff = (max_len - len) / 2;
+ bnds.lo[d] -= half_diff;
+ bnds.hi[d] += half_diff;
+ }
+}
+
+//----------------------------------------------------------------------
+// annBoxDistance - utility routine which computes distance from point to
+// box (Note: most distances to boxes are computed using incremental
+// distance updates, not this function.)
+//----------------------------------------------------------------------
+
+ANNdist annBoxDistance( // compute distance from point to box
+ const ANNpoint q, // the point
+ const ANNpoint lo, // low point of box
+ const ANNpoint hi, // high point of box
+ int dim) // dimension of space
+{
+ register ANNdist dist = 0.0; // sum of squared distances
+ register ANNdist t;
+
+ for (register int d = 0; d < dim; d++) {
+ if (q[d] < lo[d]) { // q is left of box
+ t = ANNdist(lo[d]) - ANNdist(q[d]);
+ dist = ANN_SUM(dist, ANN_POW(t));
+ }
+ else if (q[d] > hi[d]) { // q is right of box
+ t = ANNdist(q[d]) - ANNdist(hi[d]);
+ dist = ANN_SUM(dist, ANN_POW(t));
+ }
+ }
+ ANN_FLOP(4*dim) // increment floating op count
+
+ return dist;
+}
+
+//----------------------------------------------------------------------
+// annSpread - find spread along given dimension
+// annMinMax - find min and max coordinates along given dimension
+// annMaxSpread - find dimension of max spread
+//----------------------------------------------------------------------
+
+ANNcoord annSpread( // compute point spread along dimension
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d) // dimension to check
+{
+ ANNcoord min = PA(0,d); // compute max and min coords
+ ANNcoord max = PA(0,d);
+ for (int i = 1; i < n; i++) {
+ ANNcoord c = PA(i,d);
+ if (c < min) min = c;
+ else if (c > max) max = c;
+ }
+ return (max - min); // total spread is difference
+}
+
+void annMinMax( // compute min and max coordinates along dim
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension to check
+ ANNcoord &min, // minimum value (returned)
+ ANNcoord &max) // maximum value (returned)
+{
+ min = PA(0,d); // compute max and min coords
+ max = PA(0,d);
+ for (int i = 1; i < n; i++) {
+ ANNcoord c = PA(i,d);
+ if (c < min) min = c;
+ else if (c > max) max = c;
+ }
+}
+
+int annMaxSpread( // compute dimension of max spread
+ ANNpointArray pa, // point array
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim) // dimension of space
+{
+ int max_dim = 0; // dimension of max spread
+ ANNcoord max_spr = 0; // amount of max spread
+
+ if (n == 0) return max_dim; // no points, who cares?
+
+ for (int d = 0; d < dim; d++) { // compute spread along each dim
+ ANNcoord spr = annSpread(pa, pidx, n, d);
+ if (spr > max_spr) { // bigger than current max
+ max_spr = spr;
+ max_dim = d;
+ }
+ }
+ return max_dim;
+}
+
+//----------------------------------------------------------------------
+// annMedianSplit - split point array about its median
+// Splits a subarray of points pa[0..n] about an element of given
+// rank (median: n_lo = n/2) with respect to dimension d. It places
+// the element of rank n_lo-1 correctly (because our splitting rule
+// takes the mean of these two). On exit, the array is permuted so
+// that:
+//
+// pa[0..n_lo-2][d] <= pa[n_lo-1][d] <= pa[n_lo][d] <= pa[n_lo+1..n-1][d].
+//
+// The mean of pa[n_lo-1][d] and pa[n_lo][d] is returned as the
+// splitting value.
+//
+// All indexing is done indirectly through the index array pidx.
+//
+// This function uses the well known selection algorithm due to
+// C.A.R. Hoare.
+//----------------------------------------------------------------------
+
+ // swap two points in pa array
+#define PASWAP(a,b) { int tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; }
+
+void annMedianSplit(
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord &cv, // cutting value
+ int n_lo) // split into n_lo and n-n_lo
+{
+ int l = 0; // left end of current subarray
+ int r = n-1; // right end of current subarray
+ while (l < r) {
+ register int i = (r+l)/2; // select middle as pivot
+ register int k;
+
+ if (PA(i,d) > PA(r,d)) // make sure last > pivot
+ PASWAP(i,r)
+ PASWAP(l,i); // move pivot to first position
+
+ ANNcoord c = PA(l,d); // pivot value
+ i = l;
+ k = r;
+ for(;;) { // pivot about c
+ while (PA(++i,d) < c) ;
+ while (PA(--k,d) > c) ;
+ if (i < k) PASWAP(i,k) else break;
+ }
+ PASWAP(l,k); // pivot winds up in location k
+
+ if (k > n_lo) r = k-1; // recurse on proper subarray
+ else if (k < n_lo) l = k+1;
+ else break; // got the median exactly
+ }
+ if (n_lo > 0) { // search for next smaller item
+ ANNcoord c = PA(0,d); // candidate for max
+ int k = 0; // candidate's index
+ for (int i = 1; i < n_lo; i++) {
+ if (PA(i,d) > c) {
+ c = PA(i,d);
+ k = i;
+ }
+ }
+ PASWAP(n_lo-1, k); // max among pa[0..n_lo-1] to pa[n_lo-1]
+ }
+ // cut value is midpoint value
+ cv = (PA(n_lo-1,d) + PA(n_lo,d))/2.0;
+}
+
+//----------------------------------------------------------------------
+// annPlaneSplit - split point array about a cutting plane
+// Split the points in an array about a given plane along a
+// given cutting dimension. On exit, br1 and br2 are set so
+// that:
+//
+// pa[ 0 ..br1-1] < cv
+// pa[br1..br2-1] == cv
+// pa[br2.. n -1] > cv
+//
+// All indexing is done indirectly through the index array pidx.
+//
+//----------------------------------------------------------------------
+
+void annPlaneSplit( // split points by a plane
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord cv, // cutting value
+ int &br1, // first break (values < cv)
+ int &br2) // second break (values == cv)
+{
+ int l = 0;
+ int r = n-1;
+ for(;;) { // partition pa[0..n-1] about cv
+ while (l < n && PA(l,d) < cv) l++;
+ while (r >= 0 && PA(r,d) >= cv) r--;
+ if (l > r) break;
+ PASWAP(l,r);
+ l++; r--;
+ }
+ br1 = l; // now: pa[0..br1-1] < cv <= pa[br1..n-1]
+ r = n-1;
+ for(;;) { // partition pa[br1..n-1] about cv
+ while (l < n && PA(l,d) <= cv) l++;
+ while (r >= br1 && PA(r,d) > cv) r--;
+ if (l > r) break;
+ PASWAP(l,r);
+ l++; r--;
+ }
+ br2 = l; // now: pa[br1..br2-1] == cv < pa[br2..n-1]
+}
+
+
+//----------------------------------------------------------------------
+// annBoxSplit - split point array about a orthogonal rectangle
+// Split the points in an array about a given orthogonal
+// rectangle. On exit, n_in is set to the number of points
+// that are inside (or on the boundary of) the rectangle.
+//
+// All indexing is done indirectly through the index array pidx.
+//
+//----------------------------------------------------------------------
+
+void annBoxSplit( // split points by a box
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int dim, // dimension of space
+ ANNorthRect &box, // the box
+ int &n_in) // number of points inside (returned)
+{
+ int l = 0;
+ int r = n-1;
+ for(;;) { // partition pa[0..n-1] about box
+ while (l < n && box.inside(dim, PP(l))) l++;
+ while (r >= 0 && !box.inside(dim, PP(r))) r--;
+ if (l > r) break;
+ PASWAP(l,r);
+ l++; r--;
+ }
+ n_in = l; // now: pa[0..n_in-1] inside and rest outside
+}
+
+//----------------------------------------------------------------------
+// annSplitBalance - compute balance factor for a given plane split
+// Balance factor is defined as the number of points lying
+// below the splitting value minus n/2 (median). Thus, a
+// median split has balance 0, left of this is negative and
+// right of this is positive. (The points are unchanged.)
+//----------------------------------------------------------------------
+
+int annSplitBalance( // determine balance factor of a split
+ ANNpointArray pa, // points to split
+ ANNidxArray pidx, // point indices
+ int n, // number of points
+ int d, // dimension along which to split
+ ANNcoord cv) // cutting value
+{
+ int n_lo = 0;
+ for(int i = 0; i < n; i++) { // count number less than cv
+ if (PA(i,d) < cv) n_lo++;
+ }
+ return n_lo - n/2;
+}
+
+//----------------------------------------------------------------------
+// annBox2Bnds - convert bounding box to list of bounds
+// Given two boxes, an inner box enclosed within a bounding
+// box, this routine determines all the sides for which the
+// inner box is strictly contained with the bounding box,
+// and adds an appropriate entry to a list of bounds. Then
+// we allocate storage for the final list of bounds, and return
+// the resulting list and its size.
+//----------------------------------------------------------------------
+
+void annBox2Bnds( // convert inner box to bounds
+ const ANNorthRect &inner_box, // inner box
+ const ANNorthRect &bnd_box, // enclosing box
+ int dim, // dimension of space
+ int &n_bnds, // number of bounds (returned)
+ ANNorthHSArray &bnds) // bounds array (returned)
+{
+ int i;
+ n_bnds = 0; // count number of bounds
+ for (i = 0; i < dim; i++) {
+ if (inner_box.lo[i] > bnd_box.lo[i]) // low bound is inside
+ n_bnds++;
+ if (inner_box.hi[i] < bnd_box.hi[i]) // high bound is inside
+ n_bnds++;
+ }
+
+ bnds = new ANNorthHalfSpace[n_bnds]; // allocate appropriate size
+
+ int j = 0;
+ for (i = 0; i < dim; i++) { // fill the array
+ if (inner_box.lo[i] > bnd_box.lo[i]) {
+ bnds[j].cd = i;
+ bnds[j].cv = inner_box.lo[i];
+ bnds[j].sd = +1;
+ j++;
+ }
+ if (inner_box.hi[i] < bnd_box.hi[i]) {
+ bnds[j].cd = i;
+ bnds[j].cv = inner_box.hi[i];
+ bnds[j].sd = -1;
+ j++;
+ }
+ }
+}
+
+//----------------------------------------------------------------------
+// annBnds2Box - convert list of bounds to bounding box
+// Given an enclosing box and a list of bounds, this routine
+// computes the corresponding inner box. It is assumed that
+// the box points have been allocated already.
+//----------------------------------------------------------------------
+
+void annBnds2Box(
+ const ANNorthRect &bnd_box, // enclosing box
+ int dim, // dimension of space
+ int n_bnds, // number of bounds
+ ANNorthHSArray bnds, // bounds array
+ ANNorthRect &inner_box) // inner box (returned)
+{
+ annAssignRect(dim, inner_box, bnd_box); // copy bounding box to inner
+
+ for (int i = 0; i < n_bnds; i++) {
+ bnds[i].project(inner_box.lo); // project each endpoint
+ bnds[i].project(inner_box.hi);
+ }
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/basic_defs.cpp b/geom_bottleneck/bottleneck/src/basic_defs.cpp
new file mode 100644
index 0000000..e09b119
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/basic_defs.cpp
@@ -0,0 +1,230 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include <algorithm>
+#include <cfloat>
+#include "basic_defs_bt.h"
+
+namespace geom_bt {
+
+// Point
+
+bool Point::operator==(const Point& other) const
+{
+ return ((this->x == other.x) and (this->y == other.y));
+}
+
+bool Point::operator!=(const Point& other) const
+{
+ return !(*this == other);
+}
+
+std::ostream& operator<<(std::ostream& output, const Point p)
+{
+ output << "(" << p.x << ", " << p.y << ")";
+ return output;
+}
+
+std::ostream& operator<<(std::ostream& output, const PointSet& ps)
+{
+ output << "{ ";
+ for(auto& p : ps) {
+ output << p << ", ";
+ }
+ output << "\b\b }";
+ return output;
+}
+
+double sqrDist(const Point& a, const Point& b)
+{
+ return (a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y);
+}
+
+double dist(const Point& a, const Point& b)
+{
+ return sqrt(sqrDist(a, b));
+}
+
+// DiagramPoint
+
+// compute l-inf distance between two diagram points
+double distLInf(const DiagramPoint& a, const DiagramPoint& b)
+{
+ if ( DiagramPoint::DIAG == a.type &&
+ DiagramPoint::DIAG == b.type ) {
+ // distance between points on the diagonal is 0
+ return 0.0;
+ }
+ // otherwise distance is a usual l-inf distance
+ return std::max(fabs(a.getRealX() - b.getRealX()), fabs(a.getRealY() - b.getRealY()));
+}
+
+bool DiagramPoint::operator==(const DiagramPoint& other) const
+{
+ assert(this->id >= MinValidId);
+ assert(other.id >= MinValidId);
+ bool areEqual{ this->id == other.id };
+ assert(!areEqual or ((this->x == other.x) and (this->y == other.y) and (this->type == other.type)));
+ return areEqual;
+}
+
+bool DiagramPoint::operator!=(const DiagramPoint& other) const
+{
+ return !(*this == other);
+}
+
+std::ostream& operator<<(std::ostream& output, const DiagramPoint p)
+{
+ if ( p.type == DiagramPoint::DIAG ) {
+ output << "(" << p.x << ", " << p.y << ", " << 0.5 * (p.x + p.y) << ", " << p.id << " DIAG )";
+ } else {
+ output << "(" << p.x << ", " << p.y << ", " << p.id << " NORMAL)";
+ }
+ return output;
+}
+
+std::ostream& operator<<(std::ostream& output, const DiagramPointSet& ps)
+{
+ output << "{ ";
+ for(auto pit = ps.cbegin(); pit != ps.cend(); ++pit) {
+ output << *pit << ", ";
+ }
+ output << "\b\b }";
+ return output;
+}
+
+DiagramPoint::DiagramPoint(double xx, double yy, Type ttype, IdType uid) :
+ x(xx),
+ y(yy),
+ type(ttype),
+ id(uid)
+{
+ //if ( xx < 0 )
+ //throw "Negative x coordinate";
+ //if ( yy < 0)
+ //throw "Negative y coordinate";
+ //if ( yy < xx )
+ //throw "Point is below the diagonal";
+ if ( yy == xx and ttype != DiagramPoint::DIAG)
+ throw "Point on the main diagonal must have DIAG type";
+
+}
+
+void DiagramPointSet::insert(const DiagramPoint p)
+{
+ points.insert(p);
+ if (p.id > maxId) {
+ maxId = p.id + 1;
+ }
+}
+
+// erase should be called only for the element of the set
+void DiagramPointSet::erase(const DiagramPoint& p, bool doCheck)
+{
+ auto it = points.find(p);
+ if (it != points.end()) {
+ points.erase(it);
+ } else {
+ assert(!doCheck);
+ }
+}
+
+void DiagramPointSet::reserve(const size_t newSize)
+{
+ points.reserve(newSize);
+}
+
+
+void DiagramPointSet::erase(const std::unordered_set<DiagramPoint, DiagramPointHash>::const_iterator it)
+{
+ points.erase(it);
+}
+
+void DiagramPointSet::clear()
+{
+ points.clear();
+}
+
+size_t DiagramPointSet::size() const
+{
+ return points.size();
+}
+
+bool DiagramPointSet::empty() const
+{
+ return points.empty();
+}
+
+bool DiagramPointSet::hasElement(const DiagramPoint& p) const
+{
+ return points.find(p) != points.end();
+}
+
+
+void DiagramPointSet::removeDiagonalPoints()
+{
+ if (isLinked) {
+ auto ptIter = points.begin();
+ while(ptIter != points.end()) {
+ if (ptIter->isDiagonal()) {
+ ptIter = points.erase(ptIter);
+ } else {
+ ptIter++;
+ }
+ }
+ isLinked = false;
+ }
+}
+
+
+// preprocess diagrams A and B by adding projections onto diagonal of points of
+// A to B and vice versa. NB: ids of points will be changed!
+void addProjections(DiagramPointSet& A, DiagramPointSet& B)
+{
+
+ IdType uniqueId {MinValidId + 1};
+ DiagramPointSet newA, newB;
+
+ // copy normal points from A to newA
+ // add projections to newB
+ for(auto& pA : A) {
+ if (pA.isNormal()) {
+ DiagramPoint dpA {pA.getRealX(), pA.getRealY(), DiagramPoint::NORMAL, uniqueId++};
+ DiagramPoint dpB {0.5*(pA.getRealX() +pA.getRealY()), 0.5 *(pA.getRealX() +pA.getRealY()), DiagramPoint::DIAG, uniqueId++};
+ newA.insert(dpA);
+ newB.insert(dpB);
+ }
+ }
+
+ for(auto& pB : B) {
+ if (pB.isNormal()) {
+ DiagramPoint dpB {pB.getRealX(), pB.getRealY(), DiagramPoint::NORMAL, uniqueId++};
+ DiagramPoint dpA {0.5*(pB.getRealX() +pB.getRealY()), 0.5 *(pB.getRealX() +pB.getRealY()), DiagramPoint::DIAG, uniqueId++};
+ newB.insert(dpB);
+ newA.insert(dpA);
+ }
+ }
+
+ A = newA;
+ B = newB;
+ A.isLinked = true;
+ B.isLinked = true;
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/bottleneck.cpp b/geom_bottleneck/bottleneck/src/bottleneck.cpp
new file mode 100644
index 0000000..a5009c5
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/bottleneck.cpp
@@ -0,0 +1,555 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+#include <iomanip>
+#include <sstream>
+#include <string>
+#include <cctype>
+
+#include "bottleneck.h"
+//#include "test_dist_calc.h"
+
+namespace geom_bt {
+
+// return the interval (distMin, distMax) such that:
+// a) actual bottleneck distance between A and B is contained in the interval
+// b) if the interval is not (0,0), then (distMax - distMin) / distMin < epsilon
+std::pair<double, double> bottleneckDistApproxInterval(DiagramPointSet& A, DiagramPointSet& B, const double epsilon)
+{
+ // empty diagrams are not considered as error
+ if (A.empty() and B.empty())
+ return std::make_pair(0.0, 0.0);
+
+ // link diagrams A and B by adding projections
+ addProjections(A, B);
+
+ // TODO: think about that!
+ // we need one threshold for checking if the distance is 0,
+ // another one for the oracle!
+ constexpr double epsThreshold { 1.0e-10 };
+ std::pair<double, double> result { 0.0, 0.0 };
+ bool useRangeSearch { true };
+ // construct an oracle
+ BoundMatchOracle oracle(A, B, epsThreshold, useRangeSearch);
+ // check for distance = 0
+ if (oracle.isMatchLess(2*epsThreshold)) {
+ return result;
+ }
+ // get a 3-approximation of maximal distance between A and B
+ // as a starting value for probe distance
+ double distProbe { getFurthestDistance3Approx(A, B) };
+ // aliases for result components
+ double& distMin {result.first};
+ double& distMax {result.second};
+
+ if ( oracle.isMatchLess(distProbe) ) {
+ // distProbe is an upper bound,
+ // find lower bound with binary search
+ do {
+ distMax = distProbe;
+ distProbe /= 2.0;
+ } while (oracle.isMatchLess(distProbe));
+ distMin = distProbe;
+ } else {
+ // distProbe is a lower bound,
+ // find upper bound with exponential search
+ do {
+ distMin = distProbe;
+ distProbe *= 2.0;
+ } while (!oracle.isMatchLess(distProbe));
+ distMax = distProbe;
+ }
+ // bounds are found, perform binary search
+ //std::cout << "Bounds found, distMin = " << distMin << ", distMax = " << distMax << ", ratio = " << ( distMax - distMin ) / distMin << std::endl ;
+ distProbe = ( distMin + distMax ) / 2.0;
+ while ( ( distMax - distMin ) / distMin >= epsilon ) {
+ if (oracle.isMatchLess(distProbe)) {
+ distMax = distProbe;
+ } else {
+ distMin = distProbe;
+ }
+ distProbe = ( distMin + distMax ) / 2.0;
+ }
+ return result;
+}
+
+// get approximate distance,
+// see bottleneckDistApproxInterval
+double bottleneckDistApprox(DiagramPointSet& A, DiagramPointSet& B, const double epsilon)
+{
+ auto interval = bottleneckDistApproxInterval(A, B, epsilon);
+ return interval.second;
+}
+
+
+double bottleneckDistExactFromSortedPwDist(DiagramPointSet&A, DiagramPointSet& B, std::vector<double>& pairwiseDist)
+{
+ //for(size_t k = 0; k < pairwiseDist.size(); ++k) {
+ //std::cout << "pairwiseDist[" << k << "] = " << std::setprecision(15) << pairwiseDist[k] << std::endl;
+ //}
+ // trivial case: we have only one candidate
+ if (pairwiseDist.size() == 1)
+ return pairwiseDist[0];
+
+ bool useRangeSearch = true;
+ double distEpsilon = std::numeric_limits<double>::max();
+ for(size_t k = 0; k < pairwiseDist.size() - 2; ++k) {
+ auto diff = pairwiseDist[k+1]- pairwiseDist[k];
+ if ( diff > 1.0e-14 and diff < distEpsilon ) {
+ distEpsilon = diff;
+ }
+ }
+ distEpsilon /= 3.0;
+
+ BoundMatchOracle oracle(A, B, distEpsilon, useRangeSearch);
+ // binary search
+ size_t iterNum {0};
+ size_t idxMin {0}, idxMax {pairwiseDist.size() - 1};
+ size_t idxMid;
+ while(idxMax > idxMin) {
+ idxMid = static_cast<size_t>(floor(idxMin + idxMax) / 2.0);
+ //std::cout << "while begin: min = " << idxMin << ", idxMax = " << idxMax << ", idxMid = " << idxMid << ", testing d = " << std::setprecision(15) << pairwiseDist[idxMid] << std::endl;
+ iterNum++;
+ // not A[imid] < dist <=> A[imid] >= dist <=> A[imid[ >= dist + eps
+ if (oracle.isMatchLess(pairwiseDist[idxMid] + distEpsilon / 2.0)) {
+ //std::cout << "isMatchLess = true" << std::endl;
+ idxMax = idxMid;
+ } else {
+ //std::cout << "isMatchLess = false " << std::endl;
+ idxMin = idxMid + 1;
+ }
+ //std::cout << "while end: idxMin = " << idxMin << ", idxMax = " << idxMax << ", idxMid = " << idxMid << std::endl;
+ }
+ idxMid = static_cast<size_t>(floor(idxMin + idxMax) / 2.0);
+ return pairwiseDist[idxMid];
+}
+
+
+double bottleneckDistExact(DiagramPointSet& A, DiagramPointSet& B)
+{
+ constexpr double epsilon = 0.001;
+ auto interval = bottleneckDistApproxInterval(A, B, epsilon);
+ const double delta = 0.5 * (interval.second - interval.first);
+ const double approxDist = 0.5 * ( interval.first + interval.second);
+ const double minDist = interval.first;
+ const double maxDist = interval.second;
+ //std::cerr << std::setprecision(15) << "minDist = " << minDist << ", maxDist = " << maxDist << std::endl;
+ if ( delta == 0 ) {
+ return interval.first;
+ }
+ // copy points from A to a vector
+ // todo: get rid of this?
+ std::vector<DiagramPoint> pointsA;
+ pointsA.reserve(A.size());
+ for(const auto& ptA : A) {
+ pointsA.push_back(ptA);
+ }
+
+ //std::vector<double> killDist;
+ //for(auto ptA : A) {
+ //for(auto ptB : B) {
+ //if ( distLInf(ptA, ptB) > minDist and distLInf(ptA, ptB) < maxDist) {
+ //killDist.push_back(distLInf(ptA, ptB));
+ //std::cout << ptA << ", " << ptB << std::endl;
+ //}
+ //}
+ //}
+ //std::sort(killDist.begin(), killDist.end());
+ //for(auto d : killDist) {
+ //std::cout << d << std::endl;
+ //}
+ //std::cout << "*************" << std::endl;
+
+ // in this vector we store the distances between the points
+ // that are candidates to realize
+ std::vector<double> pairwiseDist;
+ {
+ // vector to store centers of vertical stripes
+ // two for each point in A and the id of the corresponding point
+ std::vector<std::pair<double, DiagramPoint>> xCentersVec;
+ xCentersVec.reserve(2 * pointsA.size());
+ for(auto ptA : pointsA) {
+ xCentersVec.push_back(std::make_pair(ptA.getRealX() - approxDist, ptA));
+ xCentersVec.push_back(std::make_pair(ptA.getRealX() + approxDist, ptA));
+ }
+ // lambda to compare pairs <coordinate, id> w.r.t coordinate
+ auto compLambda = [](std::pair<double, DiagramPoint> a, std::pair<double, DiagramPoint> b)
+ { return a.first < b.first; };
+
+ std::sort(xCentersVec.begin(), xCentersVec.end(), compLambda);
+ //std::cout << "xCentersVec.size = " << xCentersVec.size() << std::endl;
+ //for(auto p = xCentersVec.begin(); p!= xCentersVec.end(); ++p) {
+ //if (p->second.id == 200) {
+ //std::cout << "index of 200: " << p - xCentersVec.begin() << std::endl;
+ //}
+ //}
+ //std::vector<DiagramPoint>
+ // todo: sort points in B, reduce search range in lower and upper bounds
+ for(auto ptB : B) {
+ // iterator to the first stripe such that ptB lies to the left
+ // from its right boundary (x_B <= x_j + \delta iff x_j >= x_B - \delta
+ auto itStart = std::lower_bound(xCentersVec.begin(),
+ xCentersVec.end(),
+ std::make_pair(ptB.getRealX() - delta, ptB),
+ compLambda);
+ //if (ptB.id == 236) {
+ //std::cout << itStart - xCentersVec.begin() << std::endl;
+ //}
+
+ for(auto iterA = itStart; iterA < xCentersVec.end(); ++iterA) {
+ //if (ptB.id == 236) {
+ //std::cout << "consider " << iterA->second << std::endl;
+ //}
+ if ( ptB.getRealX() < iterA->first - delta) {
+ // from that moment x_B >= x_j - delta
+ // is violated: x_B no longer lies to right from the left
+ // boundary of current stripe
+ //if (ptB.id == 236) {
+ //std::cout << "break" << std::endl;
+ //}
+ break;
+ }
+ // we're here => ptB lies in vertical stripe,
+ // check if distance fits into the interval we've found
+ double pwDist = distLInf(iterA->second, ptB);
+ //if (ptB.id == 236) {
+ //std::cout << pwDist << std::endl;
+ //}
+ //std::cout << 1000*minDist << " <= " << 1000*pwDist << " <= " << 1000*maxDist << std::endl;
+ if (pwDist >= minDist and pwDist <= maxDist) {
+ pairwiseDist.push_back(pwDist);
+ }
+ }
+ }
+ }
+
+ {
+ // for y
+ // vector to store centers of vertical stripes
+ // two for each point in A and the id of the corresponding point
+ std::vector<std::pair<double, DiagramPoint>> yCentersVec;
+ yCentersVec.reserve(2 * pointsA.size());
+ for(auto ptA : pointsA) {
+ yCentersVec.push_back(std::make_pair(ptA.getRealY() - approxDist, ptA));
+ yCentersVec.push_back(std::make_pair(ptA.getRealY() + approxDist, ptA));
+ }
+ // lambda to compare pairs <coordinate, id> w.r.t coordinate
+ auto compLambda = [](std::pair<double, DiagramPoint> a, std::pair<double, DiagramPoint> b)
+ { return a.first < b.first; };
+
+ std::sort(yCentersVec.begin(), yCentersVec.end(), compLambda);
+
+ // std::cout << "Sorted vector of y-centers:" << std::endl;
+ //for(auto coordPtPair : yCentersVec) {
+ //std::cout << coordPtPair.first << ", id = " << coordPtPair.second.id << std::endl;
+ //}
+ /*std::cout << "End of sorted vector of y-centers:" << std::endl;*/
+
+ //std::vector<DiagramPoint>
+ // todo: sort points in B, reduce search range in lower and upper bounds
+ for(auto ptB : B) {
+ auto itStart = std::lower_bound(yCentersVec.begin(),
+ yCentersVec.end(),
+ std::make_pair(ptB.getRealY() - delta, ptB),
+ compLambda);
+
+ //if (ptB.id == 316) {
+ //std::cout << itStart - yCentersVec.begin() << " " << distLInf(itStart->second, ptB) << std::endl;
+ //std::cout << "maxDist = " << maxDist << std::endl;
+ //std::cout << "minDist = " << minDist << std::endl;
+ //double pwDistDebug = distLInf(itStart->second, ptB);
+ //std::cout << ( pwDistDebug >= minDist and pwDistDebug <= maxDist) << std::endl;
+ //}
+
+ for(auto iterA = itStart; iterA < yCentersVec.end(); ++iterA) {
+ if ( ptB.getRealY() < iterA->first - delta) {
+ break;
+ }
+ double pwDist = distLInf(iterA->second, ptB);
+ //std::cout << 1000*minDist << " <= " << 1000*pwDist << " <= " << 1000*maxDist << std::endl;
+ if (pwDist >= minDist and pwDist <= maxDist) {
+ //if (ptB.id == 316) {
+ //std::cout << "adding " << pwDist << std::endl;
+ //}
+ pairwiseDist.push_back(pwDist);
+ }
+ }
+ }
+ }
+
+ //std::cerr << "pairwiseDist.size = " << pairwiseDist.size() << " out of " << A.size() * A.size() << std::endl;
+ std::sort(pairwiseDist.begin(), pairwiseDist.end());
+ //for(auto ddd : pairwiseDist) {
+ //std::cerr << std::setprecision(15) << ddd << std::endl;
+ //}
+
+ return bottleneckDistExactFromSortedPwDist(A, B, pairwiseDist);
+}
+
+double bottleneckDistSlow(DiagramPointSet& A, DiagramPointSet& B)
+{
+ // use range search when building the layer graph
+ bool useRangeSearch { true };
+ // find maximum of min. distances for each point,
+ // use this value as lower bound for bottleneck distance
+ bool useHeurMinIdx { true };
+
+ // find matching in a greedy manner to
+ // get an upper bound for a bottleneck distance
+ bool useHeurGreedyMatching { false };
+
+ // use successive multiplication of idxMin with 2 to get idxMax
+ bool goUpToFindIdxMax { false };
+ //
+ goUpToFindIdxMax = goUpToFindIdxMax and !useHeurGreedyMatching;
+
+ if (!useHeurGreedyMatching) {
+ long int N = 3 * (A.size() / 2 ) * (B.size() / 2);
+ std::vector<double> pairwiseDist;
+ pairwiseDist.reserve(N);
+ double maxMinDist {0.0};
+ for(auto& p_A : A) {
+ double minDist { std::numeric_limits<double>::max() };
+ for(auto& p_B : B) {
+ if (p_A.type != DiagramPoint::DIAG or p_B.type != DiagramPoint::DIAG) {
+ double d = distLInf(p_A, p_B);
+ pairwiseDist.push_back(d);
+ if (useHeurMinIdx and p_A.type != DiagramPoint::DIAG) {
+ if (d < minDist)
+ minDist = d;
+ }
+ }
+ }
+ if (useHeurMinIdx and DiagramPoint::DIAG != p_A.type and minDist > maxMinDist) {
+ maxMinDist = minDist;
+ }
+ }
+ std::sort(pairwiseDist.begin(), pairwiseDist.end());
+
+ double distEpsilon = std::numeric_limits<double>::max();
+ for(size_t k = 0; k < pairwiseDist.size() - 2; ++k) {
+ auto diff = pairwiseDist[k+1]- pairwiseDist[k];
+ if ( diff > 1.0e-10 and diff < distEpsilon ) {
+ distEpsilon = diff;
+ }
+ }
+ distEpsilon /= 3.0;
+
+ BoundMatchOracle oracle(A, B, distEpsilon, useRangeSearch);
+ // binary search
+ size_t iterNum {0};
+ size_t idxMin {0}, idxMax {pairwiseDist.size() - 1};
+ if (useHeurMinIdx) {
+ auto maxMinIter = std::equal_range(pairwiseDist.begin(), pairwiseDist.end(), maxMinDist);
+ assert(maxMinIter.first != pairwiseDist.end());
+ idxMin = maxMinIter.first - pairwiseDist.begin();
+ //std::cout << "maxMinDist = " << maxMinDist << ", idxMin = " << idxMin << ", d = " << pairwiseDist[idxMin] << std::endl;
+ }
+
+ if (goUpToFindIdxMax) {
+ if ( pairwiseDist.size() == 1) {
+ return pairwiseDist[0];
+ }
+
+ idxMax = std::max<size_t>(idxMin, 1);
+ while (!oracle.isMatchLess(pairwiseDist[idxMax])) {
+ //std::cout << "entered while" << std::endl;
+ idxMin = idxMax;
+ if (2*idxMax > pairwiseDist.size() -1) {
+ idxMax = pairwiseDist.size() - 1;
+ break;
+ } else {
+ idxMax *= 2;
+ }
+ }
+ //std::cout << "size = " << pairwiseDist.size() << ", idxMax = " << idxMax << ", pw[max] = " << pairwiseDist[idxMax] << std::endl;
+ }
+
+ size_t idxMid { (idxMin + idxMax) / 2 };
+ while(idxMax > idxMin) {
+ iterNum++;
+ if (oracle.isMatchLess(pairwiseDist[idxMid])) {
+ idxMax = idxMid;
+ } else {
+ if (idxMax - idxMin == 1)
+ idxMin++;
+ else
+ idxMin = idxMid;
+ }
+ idxMid = (idxMin + idxMax) / 2;
+ }
+ return pairwiseDist[idxMid];
+ } else {
+ // with greeedy matching
+ long int N = A.size() * B.size();
+ std::vector<DistVerticesPair> pairwiseDist;
+ pairwiseDist.reserve(N);
+ double maxMinDist {0.0};
+ size_t idxA{0}, idxB{0};
+ for(auto p_A : A) {
+ double minDist { std::numeric_limits<double>::max() };
+ idxB = 0;
+ for(auto p_B : B) {
+ double d = distLInf(p_A, p_B);
+ pairwiseDist.push_back( std::make_pair(d, std::make_pair(idxA, idxB) ) );
+ if (useHeurMinIdx and p_A.type != DiagramPoint::DIAG) {
+ if (d < minDist)
+ minDist = d;
+ }
+ idxB++;
+ }
+ if (useHeurMinIdx and DiagramPoint::DIAG != p_A.type and minDist > maxMinDist) {
+ maxMinDist = minDist;
+ }
+ idxA++;
+ }
+
+ auto compLambda = [](DistVerticesPair a, DistVerticesPair b)
+ { return a.first < b.first;};
+
+ std::sort(pairwiseDist.begin(),
+ pairwiseDist.end(),
+ compLambda);
+
+ double distEpsilon = std::numeric_limits<double>::max();
+ for(size_t k = 0; k < pairwiseDist.size() - 2; ++k) {
+ auto diff = pairwiseDist[k+1].first - pairwiseDist[k].first;
+ if ( diff > 1.0e-10 and diff < distEpsilon ) {
+ distEpsilon = diff;
+ }
+ }
+ distEpsilon /= 3.0;
+
+ BoundMatchOracle oracle(A, B, distEpsilon, useRangeSearch);
+
+ // construct greedy matching
+ size_t numVert { A.size() };
+ size_t numMatched { 0 };
+ std::unordered_set<size_t> aTobMatched, bToaMatched;
+ aTobMatched.reserve(numVert);
+ bToaMatched.reserve(numVert);
+ size_t distVecIdx {0};
+ while( numMatched < numVert) {
+ auto vertPair = pairwiseDist[distVecIdx++].second;
+ //std::cout << "distVecIdx = " << distVecIdx << ", matched: " << numMatched << " out of " << numVert << std::endl;
+ //std::cout << "vertex A idx = " << vertPair.first << ", B idx: " << vertPair.second << " out of " << numVert << std::endl;
+ if ( aTobMatched.count(vertPair.first) == 0 and
+ bToaMatched.count(vertPair.second) == 0 ) {
+ aTobMatched.insert(vertPair.first);
+ bToaMatched.insert(vertPair.second);
+ numMatched++;
+ }
+ }
+ size_t idxMax = distVecIdx-1;
+ //std::cout << "idxMax = " << idxMax << ", size = " << pairwiseDist.size() << std::endl;
+ // binary search
+ size_t iterNum {0};
+ size_t idxMin {0};
+ if (useHeurMinIdx) {
+ auto maxMinIter = std::equal_range(pairwiseDist.begin(),
+ pairwiseDist.end(),
+ std::make_pair(maxMinDist, std::make_pair(0,0)),
+ compLambda);
+ assert(maxMinIter.first != pairwiseDist.end());
+ idxMin = maxMinIter.first - pairwiseDist.begin();
+ //std::cout << "maxMinDist = " << maxMinDist << ", idxMin = " << idxMin << ", d = " << pairwiseDist[idxMin].first << std::endl;
+ }
+ size_t idxMid { (idxMin + idxMax) / 2 };
+ while(idxMax > idxMin) {
+ iterNum++;
+ if (oracle.isMatchLess(pairwiseDist[idxMid].first)) {
+ idxMax = idxMid;
+ } else {
+ if (idxMax - idxMin == 1)
+ idxMin++;
+ else
+ idxMin = idxMid;
+ }
+ idxMid = (idxMin + idxMax) / 2;
+ }
+ return pairwiseDist[idxMid].first;
+ }
+ // stats
+ /*
+ // count number of edges
+ // pairwiseDist is sorted, add edges of the same length
+ int edgeNumber {idxMid};
+ while(pairwiseDist[edgeNumber + 1] == pairwiseDist[edgeNumber])
+ edgeNumber++;
+ // add edges between diagonal points
+ edgeNumber += N / 3;
+ // output stats
+ std::cout << idxMid << "\t" << N;
+ std::cout << "\t" << iterNum;
+ std::cout << "\t" << A.size() + B.size();
+ std::cout << "\t" << edgeNumber << "\t";
+ std::cout << (double)(edgeNumber) / (double)(A.size() + B.size()) << std::endl;
+ */
+}
+
+bool readDiagramPointSet(const std::string& fname, std::vector<std::pair<double, double>>& result)
+{
+ return readDiagramPointSet(fname.c_str(), result);
+}
+
+bool readDiagramPointSet(const char* fname, std::vector<std::pair<double, double>>& result)
+{
+ size_t lineNumber { 0 };
+ result.clear();
+ std::ifstream f(fname);
+ if (!f.good()) {
+ std::cerr << "Cannot open file " << fname << std::endl;
+ return false;
+ }
+ std::string line;
+ while(std::getline(f, line)) {
+ lineNumber++;
+ // process comments: remove everything after hash
+ auto hashPos = line.find_first_of("#", 0);
+ if( std::string::npos != hashPos) {
+ line = std::string(line.begin(), line.begin() + hashPos);
+ }
+ if (line.empty()) {
+ continue;
+ }
+ // trim whitespaces
+ auto whiteSpaceFront = std::find_if_not(line.begin(),line.end(),isspace);
+ auto whiteSpaceBack = std::find_if_not(line.rbegin(),line.rend(),isspace).base();
+ if (whiteSpaceBack <= whiteSpaceFront) {
+ // line consists of spaces only - move to the next line
+ continue;
+ }
+ line = std::string(whiteSpaceFront,whiteSpaceBack);
+ double x, y;
+ std::istringstream iss(line);
+ if (not(iss >> x >> y)) {
+ std::cerr << "Error in file " << fname << ", line number " << lineNumber << ": cannot parse \"" << line << "\"" << std::endl;
+ return false;
+ }
+ result.push_back(std::make_pair(x,y));
+ }
+ f.close();
+ return true;
+}
+
+
+
+}
diff --git a/geom_bottleneck/bottleneck/src/bound_match.cpp b/geom_bottleneck/bottleneck/src/bound_match.cpp
new file mode 100644
index 0000000..06d3b67
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/bound_match.cpp
@@ -0,0 +1,529 @@
+/*
+Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+This file is part of GeomBottleneck.
+
+GeomBottleneck is free software: you can redistribute it and/or modify
+it under the terms of the Lesser GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+GeomBottleneck is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+Lesser GNU General Public License for more details.
+
+You should have received a copy of the Lesser GNU General Public License
+along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+#include <iostream>
+#include <assert.h>
+#include "bound_match.h"
+
+namespace geom_bt {
+/*static void printDebug(//bool isDebug, std::string s)*/
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const Matching& m)
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s << std::endl;
+ //std::cout << m << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const DiagramPoint& p)
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s << p << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const double r)
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s << r << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const Path p)
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s;
+ //for(auto pt : p) {
+ //std::cout << pt << "; ";
+ //}
+ //std::cout << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const DiagramPointSet& dpSet)
+//{
+//#ifdef DEBUG_BOUND_MATCH
+ //if (isDebug) {
+ //std::cout << s << dpSet << std::endl;
+ //}
+//#endif
+/*}*/
+
+std::ostream& operator<<(std::ostream& output, const Matching& m)
+{
+ output << "Matching: " << m.AToB.size() << " pairs (";
+ if (!m.isPerfect()) {
+ output << "not";
+ }
+ output << " perfect)" << std::endl;
+ for(auto atob : m.AToB) {
+ output << atob.first << " <-> " << atob.second << " distance: " << distLInf(atob.first, atob.second) << std::endl;
+ }
+ return output;
+}
+
+void Matching::sanityCheck() const
+{
+#ifdef DEBUG_MATCHING
+ assert( AToB.size() == BToA.size() );
+ for(auto aToBPair : AToB) {
+ auto bToAPair = BToA.find(aToBPair.second);
+ assert(bToAPair != BToA.end());
+ if (aToBPair.first != bToAPair->second) {
+ std::cerr << "failed assertion, in aToB " << aToBPair.first;
+ std::cerr << ", in bToA " << bToAPair->second << std::endl;
+ }
+ assert( aToBPair.first == bToAPair->second);
+ }
+#endif
+}
+
+bool Matching::isPerfect() const
+{
+ //sanityCheck();
+ return AToB.size() == A.size();
+}
+
+void Matching::matchVertices(const DiagramPoint& pA, const DiagramPoint& pB)
+{
+ assert(A.hasElement(pA));
+ assert(B.hasElement(pB));
+ AToB.erase(pA);
+ AToB.insert( {{ pA, pB }} );
+ BToA.erase(pB);
+ BToA.insert( {{ pB, pA }} );
+}
+
+bool Matching::getMatchedVertex(const DiagramPoint& p, DiagramPoint& result) const
+{
+ sanityCheck();
+ auto inA = AToB.find(p);
+ if (inA != AToB.end()) {
+ result = (*inA).second;
+ return true;
+ } else {
+ auto inB = BToA.find(p);
+ if (inB != BToA.end()) {
+ result = (*inB).second;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Matching::checkAugPath(const Path& augPath) const
+{
+ assert(augPath.size() % 2 == 0);
+ for(size_t idx = 0; idx < augPath.size(); ++idx) {
+ bool mustBeExposed { idx == 0 or idx == augPath.size() - 1 };
+ if (isExposed(augPath[idx]) != mustBeExposed) {
+ std::cerr << "mustBeExposed = " << mustBeExposed << ", idx = " << idx << ", point " << augPath[idx] << std::endl;
+ }
+ assert( isExposed(augPath[idx]) == mustBeExposed );
+ DiagramPoint matchedVertex {0.0, 0.0, DiagramPoint::DIAG, 1};
+ if ( idx % 2 == 0 ) {
+ assert( A.hasElement(augPath[idx]));
+ if (!mustBeExposed) {
+ getMatchedVertex(augPath[idx], matchedVertex);
+ assert(matchedVertex == augPath[idx - 1]);
+ }
+ } else {
+ assert( B.hasElement(augPath[idx]));
+ if (!mustBeExposed) {
+ getMatchedVertex(augPath[idx], matchedVertex);
+ assert(matchedVertex == augPath[idx + 1]);
+ }
+ }
+ }
+}
+
+// use augmenting path to increase
+// the size of the matching
+void Matching::increase(const Path& augPath)
+{
+ //bool isDebug {false};
+ sanityCheck();
+ // check that augPath is an augmenting path
+ checkAugPath(augPath);
+ for(size_t idx = 0; idx < augPath.size() - 1; idx += 2) {
+ matchVertices( augPath[idx], augPath[idx + 1]);
+ }
+ //printDebug(isDebug, "", *this);
+ sanityCheck();
+}
+
+DiagramPointSet Matching::getExposedVertices(bool forA) const
+{
+ sanityCheck();
+ DiagramPointSet result;
+ const DiagramPointSet* setToSearch { forA ? &A : &B };
+ const std::unordered_map<DiagramPoint, DiagramPoint, DiagramPointHash>* mapToSearch { forA ? &AToB : &BToA };
+ for(auto it = setToSearch->cbegin(); it != setToSearch->cend(); ++it) {
+ if (mapToSearch->find((*it)) == mapToSearch->cend()) {
+ result.insert((*it));
+ }
+ }
+ return result;
+}
+
+void Matching::getAllAdjacentVertices(const DiagramPointSet& setIn,
+ DiagramPointSet& setOut,
+ bool forA) const
+{
+ sanityCheck();
+ //bool isDebug {false};
+ setOut.clear();
+ const std::unordered_map<DiagramPoint, DiagramPoint, DiagramPointHash>* m;
+ m = ( forA ) ? &BToA : &AToB;
+ for(auto pit = setIn.cbegin(); pit != setIn.cend(); ++pit) {
+ auto findRes = m->find(*pit);
+ if (findRes != m->cend()) {
+ setOut.insert((*findRes).second);
+ }
+ }
+ //printDebug(isDebug, "got all adjacent vertices for ", setIn);
+ //printDebug(isDebug, "the result is: ", setOut);
+ sanityCheck();
+}
+
+bool Matching::isExposed(const DiagramPoint& p) const
+{
+ return ( AToB.find(p) == AToB.end() ) && ( BToA.find(p) == BToA.end() );
+}
+
+
+BoundMatchOracle::BoundMatchOracle(DiagramPointSet psA, DiagramPointSet psB,
+ double dEps, bool useRS) :
+ A(psA), B(psB), M(A, B), distEpsilon(dEps), useRangeSearch(useRS), prevQueryValue(0.0)
+{
+ neighbOracle = new NeighbOracle(psB, 0, distEpsilon);
+}
+
+bool BoundMatchOracle::isMatchLess(double r)
+{
+ return buildMatchingForThreshold(r);
+}
+
+
+void BoundMatchOracle::removeFromLayer(const DiagramPoint& p, const int layerIdx) {
+ //bool isDebug {false};
+ //printDebug(isDebug, "entered removeFromLayer, layerIdx == " + std::to_string(layerIdx) + ", p = ", p);
+ layerGraph[layerIdx].erase(p);
+ if (layerOracles[layerIdx]) {
+ layerOracles[layerIdx]->deletePoint(p);
+ }
+}
+
+// return true, if there exists an augmenting path from startVertex
+// in this case the path is returned in result.
+// startVertex must be an exposed vertex from L_1 (layer[0])
+bool BoundMatchOracle::buildAugmentingPath(const DiagramPoint startVertex, Path& result)
+{
+ //bool isDebug {false};
+ //printDebug(isDebug, "Entered buildAugmentingPath, startVertex: ", startVertex);
+ DiagramPoint prevVertexA = startVertex;
+ result.clear();
+ result.push_back(startVertex);
+ size_t evenLayerIdx {1};
+ while ( evenLayerIdx < layerGraph.size() ) {
+ //for(size_t evenLayerIdx = 1; evenLayerIdx < layerGraph.size(); evenLayerIdx += 2) {
+ DiagramPoint nextVertexB{0.0, 0.0, DiagramPoint::DIAG, 1}; // next vertex from even layer
+ bool neighbFound = layerOracles[evenLayerIdx]->getNeighbour(prevVertexA, nextVertexB);
+ //printDebug(isDebug, "Searched neighbours for ", prevVertexA);
+ //printDebug(isDebug, "; the result is ", nextVertexB);
+ if (neighbFound) {
+ result.push_back(nextVertexB);
+ if ( layerGraph.size() == evenLayerIdx + 1) {
+ //printDebug(isDebug, "Last layer reached, stopping; the path: ", result);
+ break;
+ } else {
+ // nextVertexB must be matched with some vertex from the next odd
+ // layer
+ DiagramPoint nextVertexA {0.0, 0.0, DiagramPoint::DIAG, 1};
+ if (!M.getMatchedVertex(nextVertexB, nextVertexA)) {
+ std::cerr << "Vertices in even layers must be matched! Unmatched: ";
+ std::cerr << nextVertexB << std::endl;
+ std::cerr << evenLayerIdx << "; " << layerGraph.size() << std::endl;
+ throw "Unmatched vertex in even layer";
+ } else {
+ assert( ! (nextVertexA.getRealX() == 0 and nextVertexA.getRealY() == 0) );
+ result.push_back(nextVertexA);
+ //printDebug(isDebug, "Matched vertex from the even layer added to the path, result: ", result);
+ prevVertexA = nextVertexA;
+ evenLayerIdx += 2;
+ continue;
+ }
+ }
+ } else {
+ // prevVertexA has no neighbours in the next layer,
+ // backtrack
+ if (evenLayerIdx == 1) {
+ // startVertex is not connected to any vertices
+ // in the next layer, augm. path doesn't exist
+ //printDebug(isDebug, "startVertex is not connected to any vertices in the next layer, augm. path doesn't exist");
+ removeFromLayer(startVertex, 0);
+ return false;
+ } else {
+ assert(evenLayerIdx >= 3);
+ assert(result.size() % 2 == 1);
+ result.pop_back();
+ DiagramPoint prevVertexB = result.back();
+ result.pop_back();
+ //printDebug(isDebug, "removing 2 previous vertices from layers, evenLayerIdx == ", evenLayerIdx);
+ removeFromLayer(prevVertexA, evenLayerIdx-1);
+ removeFromLayer(prevVertexB, evenLayerIdx-2);
+ // we should proceed from the previous odd layer
+ //printDebug(isDebug, "Here! res.size == ", result.size());
+ assert(result.size() >= 1);
+ prevVertexA = result.back();
+ evenLayerIdx -= 2;
+ continue;
+ }
+ }
+ } // finished iterating over all layers
+ // remove all vertices in the augmenting paths
+ // the corresponding layers
+ for(size_t layerIdx = 0; layerIdx < result.size(); ++layerIdx) {
+ removeFromLayer(result[layerIdx], layerIdx);
+ }
+ return true;
+}
+
+
+// remove all edges whose length is > newThreshold
+void Matching::trimMatching(const double newThreshold)
+{
+ //bool isDebug { false };
+ sanityCheck();
+ for(auto aToBIter = AToB.begin(); aToBIter != AToB.end(); ) {
+ if ( distLInf(aToBIter->first, aToBIter->second) > newThreshold ) {
+ // remove edge from AToB and BToA
+ //printDebug(isDebug, "removing edge ", aToBIter->first);
+ //printDebug(isDebug, " <-> ", aToBIter->second);
+ BToA.erase(aToBIter->second);
+ aToBIter = AToB.erase(aToBIter);
+ } else {
+ aToBIter++;
+ }
+ }
+ sanityCheck();
+}
+
+bool BoundMatchOracle::buildMatchingForThreshold(const double r)
+{
+ //bool isDebug {false};
+ //printDebug(isDebug,"Entered buildMatchingForThreshold, r = " + std::to_string(r));
+ if (prevQueryValue > r) {
+ M.trimMatching(r);
+ }
+ prevQueryValue = r;
+ while(true) {
+ buildLayerGraph(r);
+ //printDebug(isDebug,"Layer graph built");
+ if (augPathExist) {
+ std::vector<Path> augmentingPaths;
+ DiagramPointSet copyLG0;
+ for(DiagramPoint p : layerGraph[0]) {
+ copyLG0.insert(p);
+ }
+ for(DiagramPoint exposedVertex : copyLG0) {
+ Path augPath;
+ if (buildAugmentingPath(exposedVertex, augPath)) {
+ //printDebug(isDebug, "Augmenting path found", augPath);
+ augmentingPaths.push_back(augPath);
+ }
+ /*
+ else {
+ printDebug(isDebug,"augmenting paths must exist, but were not found!", M);
+ std::cerr << "augmenting paths must exist, but were not found!" << std::endl;
+ std::cout.flush();
+ std::cerr.flush();
+ printLayerGraph();
+ //throw "Something went wrong-1";
+ //return M.isPerfect();
+ // analyze: finished or no paths exist
+ // can this actually happen?
+ }
+ */
+
+ }
+ if (augmentingPaths.empty()) {
+ //printDebug(isDebug,"augmenting paths must exist, but were not found!", M);
+ std::cerr << "augmenting paths must exist, but were not found!" << std::endl;
+ throw "bad epsilon?";
+ }
+ // swap all augmenting paths with matching to increase it
+ //printDebug(isDebug,"before increase with augmenting paths:", M);
+ for(auto& augPath : augmentingPaths ) {
+ //printDebug(isDebug, "Increasing with augm. path ", augPath);
+ M.increase(augPath);
+ }
+ //printDebug(isDebug,"after increase with augmenting paths:", M);
+ } else {
+ //printDebug(isDebug,"no augmenting paths exist, matching returned is:", M);
+ return M.isPerfect();
+ }
+ }
+}
+
+void BoundMatchOracle::printLayerGraph(void)
+{
+#ifdef DEBUG_BOUND_MATCH
+ for(auto& layer : layerGraph) {
+ std::cout << "{ ";
+ for(auto& p : layer) {
+ std::cout << p << "; ";
+ }
+ std::cout << "\b\b }" << std::endl;
+ }
+#endif
+}
+
+void BoundMatchOracle::buildLayerGraph(double r)
+{
+ //bool isDebug {false};
+ //printDebug(isDebug,"Entered buildLayerGraph");
+ layerGraph.clear();
+ DiagramPointSet L1 = M.getExposedVertices();
+ //printDebug(isDebug,"Got exposed vertices");
+ layerGraph.push_back(L1);
+ neighbOracle->rebuild(B, r);
+ //printDebug(isDebug,"Oracle rebuilt");
+ size_t k = 0;
+ DiagramPointSet layerNextEven;
+ DiagramPointSet layerNextOdd;
+ bool exposedVerticesFound {false};
+ while(true) {
+ //printDebug(isDebug, "k = ", k);
+ layerNextEven.clear();
+ for( auto p : layerGraph[k]) {
+ //printDebug(isDebug,"looking for neighbours for ", p);
+ bool neighbFound;
+ DiagramPoint neighbour {0.0, 0.0, DiagramPoint::DIAG, 1};
+ if (useRangeSearch) {
+ std::vector<DiagramPoint> neighbVec;
+ neighbOracle->getAllNeighbours(p, neighbVec);
+ neighbFound = !neighbVec.empty();
+ for(auto& neighbPt : neighbVec) {
+ layerNextEven.insert(neighbPt);
+ if (!exposedVerticesFound and M.isExposed(neighbPt))
+ exposedVerticesFound = true;
+ }
+ } else {
+ while(true) {
+ neighbFound = neighbOracle->getNeighbour(p, neighbour);
+ if (neighbFound) {
+ //printDebug(isDebug,"neighbour found, ", neighbour);
+ layerNextEven.insert(neighbour);
+ neighbOracle->deletePoint(neighbour);
+ //printDebug(isDebug,"is exposed: " + std::to_string(M.isExposed(neighbour)));
+ if ((!exposedVerticesFound) && M.isExposed(neighbour)) {
+ exposedVerticesFound = true;
+ }
+ } else {
+ //printDebug(isDebug,"no neighbours found for r = ", r);
+ break;
+ }
+ }
+ } // without range search
+ } // all vertices from previous odd layer processed
+ //printDebug(isDebug,"Next even layer finished");
+ if (layerNextEven.empty()) {
+ //printDebug(isDebug,"Next even layer is empty, augPathExist = false");
+ augPathExist = false;
+ break;
+ }
+ if (exposedVerticesFound) {
+ //printDebug(isDebug,"Exposed vertices found in the even layer, aug. paths exist");
+ //printDebug(isDebug,"Deleting all non-exposed from the last layer (we do not need them).");
+ for(auto it = layerNextEven.cbegin(); it != layerNextEven.cend(); ) {
+ if ( ! M.isExposed(*it) ) {
+ layerNextEven.erase(it++);
+ } else {
+ ++it;
+ }
+
+ }
+ layerGraph.push_back(layerNextEven);
+ augPathExist = true;
+ break;
+ }
+ layerGraph.push_back(layerNextEven);
+ M.getAllAdjacentVertices(layerNextEven, layerNextOdd);
+ //printDebug(isDebug,"Next odd layer finished");
+ layerGraph.push_back(layerNextOdd);
+ k += 2;
+ }
+ buildLayerOracles(r);
+ //printDebug(isDebug,"layer oracles built, layer graph:");
+ printLayerGraph();
+}
+
+
+
+BoundMatchOracle::~BoundMatchOracle()
+{
+ for(auto& oracle : layerOracles) {
+ delete oracle;
+ }
+ delete neighbOracle;
+}
+
+// create geometric oracles for each even layer
+// odd layers have NULL in layerOracles
+void BoundMatchOracle::buildLayerOracles(double r)
+{
+ //bool isDebug {false};
+ //printDebug(isDebug,"entered buildLayerOracles");
+ // free previously constructed oracles
+ for(auto& oracle : layerOracles) {
+ delete oracle;
+ }
+ layerOracles.clear();
+ //printDebug(isDebug,"previous oracles deleted");
+ for(size_t layerIdx = 0; layerIdx < layerGraph.size(); ++layerIdx) {
+ if (layerIdx % 2 == 1) {
+ // even layer, build actual oracle
+ layerOracles.push_back(new NeighbOracle(layerGraph[layerIdx], r, distEpsilon));
+ } else {
+ // odd layer
+ layerOracles.push_back(nullptr);
+ }
+ }
+ //printDebug(isDebug,"exiting buildLayerOracles");
+}
+}
diff --git a/geom_bottleneck/bottleneck/src/brute.cpp b/geom_bottleneck/bottleneck/src/brute.cpp
new file mode 100644
index 0000000..200bc35
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/brute.cpp
@@ -0,0 +1,110 @@
+//----------------------------------------------------------------------
+// File: brute.cpp
+// Programmer: Sunil Arya and David Mount
+// Description: Brute-force nearest neighbors
+// Last modified: 05/03/05 (Version 1.1)
+//----------------------------------------------------------------------
+// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
+// David Mount. All Rights Reserved.
+//
+// This software and related documentation is part of the Approximate
+// Nearest Neighbor Library (ANN). This software is provided under
+// the provisions of the Lesser GNU Public License (LGPL). See the
+// file ../ReadMe.txt for further information.
+//
+// The University of Maryland (U.M.) and the authors make no
+// representations about the suitability or fitness of this software for
+// any purpose. It is provided "as is" without express or implied
+// warranty.
+//----------------------------------------------------------------------
+// History:
+// Revision 0.1 03/04/98
+// Initial release
+// Revision 1.1 05/03/05
+// Added fixed-radius kNN search
+//----------------------------------------------------------------------
+
+#include <ANN/ANNx.h> // all ANN includes
+#include "pr_queue_k.h" // k element priority queue
+
+//----------------------------------------------------------------------
+// Brute-force search simply stores a pointer to the list of
+// data points and searches linearly for the nearest neighbor.
+// The k nearest neighbors are stored in a k-element priority
+// queue (which is implemented in a pretty dumb way as well).
+//
+// If ANN_ALLOW_SELF_MATCH is ANNfalse then data points at distance
+// zero are not considered.
+//
+// Note that the error bound eps is passed in, but it is ignored.
+// These routines compute exact nearest neighbors (which is needed
+// for validation purposes in ann_test.cpp).
+//----------------------------------------------------------------------
+
+ANNbruteForce::ANNbruteForce( // constructor from point array
+ ANNpointArray pa, // point array
+ int n, // number of points
+ int dd) // dimension
+{
+ dim = dd; n_pts = n; pts = pa;
+}
+
+ANNbruteForce::~ANNbruteForce() { } // destructor (empty)
+
+void ANNbruteForce::annkSearch( // approx k near neighbor search
+ ANNpoint q, // query point
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor indices (returned)
+ ANNdistArray dd, // dist to near neighbors (returned)
+ double eps) // error bound (ignored)
+{
+ ANNmin_k mk(k); // construct a k-limited priority queue
+ int i;
+
+ if (k > n_pts) { // too many near neighbors?
+ annError("Requesting more near neighbors than data points", ANNabort);
+ }
+ // run every point through queue
+ for (i = 0; i < n_pts; i++) {
+ // compute distance to point
+ ANNdist sqDist = annDist(dim, pts[i], q);
+ if (ANN_ALLOW_SELF_MATCH || sqDist != 0)
+ mk.insert(sqDist, i);
+ }
+ for (i = 0; i < k; i++) { // extract the k closest points
+ dd[i] = mk.ith_smallest_key(i);
+ nn_idx[i] = mk.ith_smallest_info(i);
+ }
+}
+
+int ANNbruteForce::annkFRSearch( // approx fixed-radius kNN search
+ ANNpoint q, // query point
+ ANNdist sqRad, // squared radius
+ int k, // number of near neighbors to return
+ ANNidxArray nn_idx, // nearest neighbor array (returned)
+ ANNdistArray dd, // dist to near neighbors (returned)
+ double eps) // error bound
+{
+ ANNmin_k mk(k); // construct a k-limited priority queue
+ int i;
+ int pts_in_range = 0; // number of points in query range
+ // run every point through queue
+ for (i = 0; i < n_pts; i++) {
+ // compute distance to point
+ ANNdist sqDist = annDist(dim, pts[i], q);
+ if (sqDist <= sqRad && // within radius bound
+ (ANN_ALLOW_SELF_MATCH || sqDist != 0)) { // ...and no self match
+ mk.insert(sqDist, i);
+ pts_in_range++;
+ }
+ }
+ for (i = 0; i < k; i++) { // extract the k closest points
+ if (dd != NULL)
+ dd[i] = mk.ith_smallest_key(i);
+ if (nn_idx != NULL)
+ nn_idx[i] = mk.ith_smallest_info(i);
+ }
+
+ return pts_in_range;
+}
+-\n}\n
diff --git a/geom_bottleneck/bottleneck/src/neighb_oracle.cpp b/geom_bottleneck/bottleneck/src/neighb_oracle.cpp
new file mode 100644
index 0000000..356883f
--- /dev/null
+++ b/geom_bottleneck/bottleneck/src/neighb_oracle.cpp
@@ -0,0 +1,278 @@
+/*
+ Copyrigth 2015, D. Morozov, M. Kerber, A. Nigmetov
+
+ This file is part of GeomBottleneck.
+
+ GeomBottleneck is free software: you can redistribute it and/or modify
+ it under the terms of the Lesser GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ GeomBottleneck is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ Lesser GNU General Public License for more details.
+
+ You should have received a copy of the Lesser GNU General Public License
+ along with GeomBottleneck. If not, see <http://www.gnu.org/licenses/>.
+
+*/
+
+
+#include <algorithm>
+#include "neighb_oracle.h"
+#include "def_debug.h"
+
+namespace geom_bt {
+/*static void printDebug(//bool isDebug, std::string s)*/
+//{
+//#ifdef DEBUG_NEIGHBOUR_ORACLE
+ //if (isDebug) {
+ //std::cout << s << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const DiagramPoint& p)
+//{
+//#ifdef DEBUG_NEIGHBOUR_ORACLE
+ //if (isDebug) {
+ //std::cout << s << p << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const double r)
+//{
+//#ifdef DEBUG_NEIGHBOUR_ORACLE
+ //if (isDebug) {
+ //std::cout << s << r << std::endl;
+ //}
+//#endif
+//}
+
+//static void printDebug(//bool isDebug, std::string s, const DiagramPointSet& dpSet)
+//{
+//#ifdef DEBUG_NEIGHBOUR_ORACLE
+ //if (isDebug) {
+ //std::cout << s << dpSet << std::endl;
+ //}
+//#endif
+//}
+
+
+
+// simple oracle
+NeighbOracleSimple::NeighbOracleSimple()
+{
+ r = 0.0;
+}
+
+NeighbOracleSimple::NeighbOracleSimple(const DiagramPointSet& S, const double rr, const double dEps)
+{
+ r = rr;
+ distEpsilon = dEps;
+ pointSet = S;
+}
+
+void NeighbOracleSimple::rebuild(const DiagramPointSet& S, const double rr)
+{
+ pointSet = S;
+ r = rr;
+}
+
+void NeighbOracleSimple::deletePoint(const DiagramPoint& p)
+{
+ pointSet.erase(p);
+}
+
+bool NeighbOracleSimple::getNeighbour(const DiagramPoint& q, DiagramPoint& result) const
+{
+ for(auto pit = pointSet.cbegin(); pit != pointSet.cend(); ++pit) {
+ if ( distLInf(*pit, q) <= r) {
+ result = *pit;
+ return true;
+ }
+ }
+ return false;
+}
+
+void NeighbOracleSimple::getAllNeighbours(const DiagramPoint& q, std::vector<DiagramPoint>& result)
+{
+ result.clear();
+ for(const auto& point : pointSet) {
+ if ( distLInf(point, q) <= r) {
+ result.push_back(point);
+ }
+ }
+ for(auto& pt : result) {
+ deletePoint(pt);
+ }
+}
+
+// ANN oracle
+//
+
+NeighbOracleAnn::NeighbOracleAnn(const DiagramPointSet& S, const double rr, const double dEps)
+{
+ assert(dEps >= 0);
+ distEpsilon = dEps;
+ // allocate space for query point
+ // and the output of nearest neighbour search
+ // this memory will be used in getNeighbour and freed in destructor
+ annQueryPoint = annAllocPt(annDim);
+ annIndices = new ANNidx[annK];
+ annDistances = new ANNdist[annK];
+ annPoints = nullptr;
+ lo = annAllocPt(annDim);
+ hi = annAllocPt(annDim);
+ // create kd tree
+ kdTree = nullptr;
+ rebuild(S, rr);
+}
+
+void NeighbOracleAnn::rebuild(const DiagramPointSet& S, double rr)
+{
+ //bool isDebug { false };
+ //printDebug(isDebug, "Entered rebuild, r = ", rr);
+ r = rr;
+ size_t annNumPoints = S.size();
+ //printDebug(isDebug, "S = ", S);
+ if (annNumPoints > 0) {
+ //originalPointSet = S;
+ pointIdxLookup.clear();
+ pointIdxLookup.reserve(S.size());
+ allPoints.clear();
+ allPoints.reserve(S.size());
+ diagonalPoints.clear();
+ diagonalPoints.reserve(S.size() / 2);
+ for(auto pit = S.cbegin(); pit != S.cend(); ++pit) {
+ allPoints.push_back(*pit);
+ if (pit->type == DiagramPoint::DIAG) {
+ diagonalPoints.insert(*pit);
+ }
+ }
+ if (annPoints) {
+ annDeallocPts(annPoints);
+ }
+ annPoints = annAllocPts(annNumPoints, annDim);
+ auto annPointsPtr = *annPoints;
+ size_t pointIdx = 0;
+ for(auto& dataPoint : allPoints) {
+ pointIdxLookup.insert( { dataPoint, pointIdx++ } );
+ *annPointsPtr++ = dataPoint.getRealX();
+ *annPointsPtr++ = dataPoint.getRealY();
+ }
+ delete kdTree;
+ kdTree = new ANNkd_tree(annPoints,
+ annNumPoints,
+ annDim,
+ 1, // bucket size
+ ANN_KD_STD);
+ }
+}
+
+void NeighbOracleAnn::deletePoint(const DiagramPoint& p)
+{
+ //bool isDebug { true };
+ auto findRes = pointIdxLookup.find(p);
+ assert(findRes != pointIdxLookup.end());
+ //printDebug(isDebug, "Deleting point ", p);
+ size_t pointIdx { (*findRes).second };
+ //printDebug(isDebug, "pointIdx = ", pointIdx);
+ //originalPointSet.erase(p);
+ diagonalPoints.erase(p, false);
+ kdTree->delete_point(pointIdx);
+#ifdef DEBUG_NEIGHBOUR_ORACLE
+ kdTree->Print(ANNtrue, std::cout);
+#endif
+}
+
+bool NeighbOracleAnn::getNeighbour(const DiagramPoint& q, DiagramPoint& result) const
+{
+ //bool isDebug { false };
+ //printDebug(isDebug, "getNeighbour for q = ", q);
+ if (0 == kdTree->getActualNumPoints() ) {
+ //printDebug(isDebug, "annNumPoints = 0, not found ");
+ return false;
+ }
+ // distance between two diagonal points
+ // is 0
+ if (DiagramPoint::DIAG == q.type) {
+ if (!diagonalPoints.empty()) {
+ result = *diagonalPoints.cbegin();
+ //printDebug(isDebug, "Neighbour found in diagonal points, res = ", result);
+ return true;
+ }
+ }
+ // if no neighbour found among diagonal points,
+ // search in ANN kd_tree
+ annQueryPoint[0] = q.getRealX();
+ annQueryPoint[1] = q.getRealY();
+ //annIndices[0] = ANN_NULL_IDX;
+ kdTree->annkSearch(annQueryPoint, annK, annIndices, annDistances, annEpsilon);
+ //kdTree->annkFRSearch(annQueryPoint, r, annK, annIndices, annDistances, annEpsilon);
+ //std::cout << distEpsilon << " = distEpsilon " << std::endl;
+ if (annDistances[0] <= r + distEpsilon) {
+ //if (annIndices[0] != ANN_NULL_IDX) {
+ result = allPoints[annIndices[0]];
+ //printDebug(isDebug, "Neighbour found with kd-tree, index = ", annIndices[0]);
+ //printDebug(isDebug, "result = ", result);
+ return true;
+ }
+ //printDebug(isDebug, "No neighbour found for r = ", r);
+ return false;
+}
+
+void NeighbOracleAnn::getAllNeighbours(const DiagramPoint& q, std::vector<DiagramPoint>& result)
+{
+ //bool isDebug { true };
+ //printDebug(isDebug, "Entered getAllNeighbours for q = ", q);
+ result.clear();
+ // add diagonal points, if necessary
+ if ( DiagramPoint::DIAG == q.type) {
+ for( auto& diagPt : diagonalPoints ) {
+ result.push_back(diagPt);
+ }
+ }
+ // delete diagonal points we found
+ // to prevent finding them again
+ for(auto& pt : result) {
+ //printDebug(isDebug, "deleting DIAG point pt = ", pt);
+ deletePoint(pt);
+ }
+ size_t diagOffset = result.size();
+ // create the query rectangle
+ // centered at q of radius r
+ lo[0] = q.getRealX() - r;
+ lo[1] = q.getRealY() - r;
+ hi[0] = q.getRealX() + r;
+ hi[1] = q.getRealY() + r;
+ ANNorthRect annRect { annDim, lo, hi };
+ std::vector<size_t> pointIndicesOut;
+ // perorm range search on kd-tree
+ kdTree->range_search(annRect, pointIndicesOut);
+ // get actual points in result
+ for(auto& ptIdx : pointIndicesOut) {
+ result.push_back(allPoints[ptIdx]);
+ }
+ // delete all points we found
+ for(auto ptIt = result.begin() + diagOffset; ptIt != result.end(); ++ptIt) {
+ //printDebug(isDebug, "deleting point pt = ", *ptIt);
+ deletePoint(*ptIt);
+ }
+}
+
+NeighbOracleAnn::~NeighbOracleAnn()
+{
+ delete [] annIndices;
+ delete [] annDistances;
+ delete kdTree;
+ annDeallocPt(annQueryPoint);
+ annDeallocPt(lo);
+ annDeallocPt(hi);
+ if (annPoints) {
+ annDeallocPts(annPoints);
+ }
+}
+}
diff --git a/geom_bottleneck/example/bottleneck_dist.cpp b/geom_bottleneck/example/bottleneck_dist.cpp
new file mode 100644
index 0000000..9a50ce2
--- /dev/null
+++ b/geom_bottleneck/example/bottleneck_dist.cpp
@@ -0,0 +1,55 @@
+#include <iomanip>
+#include "bottleneck.h"
+
+// any container of pairs of doubles can be used,
+// we use vector in this example.
+
+typedef std::vector<std::pair<double, double>> PairVector;
+
+int main(int argc, char* argv[])
+{
+ if (argc < 3 ) {
+ std::cerr << "Usage: " << argv[0] << " file1 file2 [relative_error]. Without relative_error calculate the exact distance." << std::endl;
+ return 1;
+ }
+
+ PairVector diagramA, diagramB;
+ if (!geom_bt::readDiagramPointSet(argv[1], diagramA)) {
+ std::exit(1);
+ }
+
+ if (!geom_bt::readDiagramPointSet(argv[2], diagramB)) {
+ std::exit(1);
+ }
+
+ double res;
+ if (argc >= 4) {
+ // the third parameter is epsilon,
+ // return approximate distance (faster)
+ double approxEpsilon = atof(argv[3]);
+ if (approxEpsilon > 0.0) {
+ res = geom_bt::bottleneckDistApprox(diagramA, diagramB, approxEpsilon);
+ } else if (approxEpsilon == 0.0) {
+ res = geom_bt::bottleneckDistExact(diagramA, diagramB);
+ } else {
+ std::cerr << "The third parameter (relative error) must be positive!" << std::endl;
+ std::exit(1);
+ }
+ } else {
+ // only filenames have been supplied, return exact distance
+ res = geom_bt::bottleneckDistExact(diagramA, diagramB);
+ }
+ std::cout << std::setprecision(15) << res << std::endl;
+
+ // Alternative could be to construct DiagramPointSet
+ // using the constructor with iterators.
+ // May be useful if the same diagram is used multiple times
+ // to avoid copying data from user's container each time.
+
+ //geom_bt::DiagramPointSet dA(diagramA.begin(), diagramA.end());
+ //geom_bt::DiagramPointSet dB(diagramB.begin(), diagramB.end());
+ //double result1 = geom_bt::bottleneckDistExact(dA, dB);
+ //std::cout << std::setprecision(15) << result1 << std::endl;
+
+ return 0;
+}
diff --git a/geom_matching/.gitignore b/geom_matching/.gitignore
new file mode 100644
index 0000000..2a1ef92
--- /dev/null
+++ b/geom_matching/.gitignore
@@ -0,0 +1,32 @@
+/*.cfg
+wasserstein/build/
+*.gitattributes
+*.opensdf
+*.sdf
+*.suo
+*.vcxproj
+*.filters
+*.log
+*.tlog
+*.lastbuildstate
+*.obj
+*.idb
+*.pdb
+*.exe
+*.ilk
+*.user
+*.out
+*.output
+*.pyc
+*.*~*
+*.swp
+*.nfs*
+*.txt
+*.pdf
+*.o
+*.d
+*.dll
+*.lib
+*.exe
+makeout
+tags
diff --git a/geom_matching/README b/geom_matching/README
new file mode 100644
index 0000000..33a5611
--- /dev/null
+++ b/geom_matching/README
@@ -0,0 +1,93 @@
+This is a program for computing Wasserstein distances between persistence
+diagrams using the geometric version of auction algorithm.
+
+Accompanying paper: M. Kerber, D. Morozov, A. Nigmetov. Geometry Helps To Compare
+Persistence Diagrams (ALENEX 2016, http://www.geometrie.tugraz.at/nigmetov/geom_dist.pdf)
+Bug reports can be sent to "nigmetov EMAIL SIGN tugraz DOT at".
+
+Wasserstein distance $W_{q, p}(X, Y)$ between two persistent diagrams is
+the minimum over all perfect matchings between $X$ and $Y$ ( $y(x)$ is the point of $Y$
+matched to $x \in X$ ) of the following expression:
+$ ( \sum \| x - y(x) \|_p ^ { q } ) ^ { 1 / q} $
+
+# Dependencies
+
+Requires boost 1.58 or higher.
+Your compiler must support C++11.
+
+# Usage:
+
+To use a standalone command-line utility wasserstein_dist:
+
+wasserstein_dist file1 file2 [wasserstein degree] [relative error] [internal norm].
+
+Parameter wasserstein degree corresponds to $q$, when it tends to infinity,
+Wasserstein distance tends to the bottleneck distance.
+
+Parameter internal_p corresponds to p.
+
+Default values:
+wasserstein_degree = 1.0,
+relative_error = 0.01,
+internal_p = infinity.
+
+Valid values:
+wasserstein_degree must be in $[1.0, \infinity)$,
+relative_error must be positive,
+internal_p must be in $[1.0, \infinity]$ (to explicitly set internal_p to $\infinity$, supply inf).By default wasserstein degree is 1.0, relative error is 0.01, internal norm is l_infinity.
+
+file1 and file2 must contain persistence diagrams in plain text format
+(one point per line, empty lines are ignored, comments can be made with #):
+
+# this is how your input can look like
+x_1 y_1 # two real numbers per line
+...
+# empty lines or comments are ignored
+x_n y_n
+
+To use from your code:
+
+#include "wasserstein.h"
+
+// All classes and functions are in geom_ws namespace
+
+std::vector<std::pair<double, double>> diagram1, diagram2;
+// any container class that supports range-for loops will do.
+// A pair represents a single point,
+// first component = x-coordinate,
+// second component = y-coordinate.
+// ...
+// load your diagrams into diagram1, diagram2 (off-diagonal points).
+// You can use function readDiagramPointSet:
+geom_ws::readDiagramPointSet("diagram1.txt", diagram1);
+geom_ws::readDiagramPointSet("diagram2.txt", diagram1);
+// ...
+// to get the distance:
+double wsDist = geom_ws::wassersteinDist(diagram1, diagram2, q, delta, p);
+// q is wasserstein degree, delta is relative error,
+// p is the internal norm in Wasserstein distance, defaults to infinity
+
+Necessary projections (diagonal points) will be added in the wassersteinDist
+function.
+
+See also code in wasserstein/example/wasserstein_dist.cpp.
+
+# License
+
+See wasserstein/license.txt
+
+# Building
+
+CMakeLists.txt in the root directory can be used to make the library (contained
+in wasserstein/src/ directory) and the command-line utility (in wasserstein/example/ directory)
+to compute the distance between two diagrams in txt files.
+
+On Linux/Mac:
+
+mkdir build
+cd build
+cmake ..
+make
+
+On Windows (checked with Visual Studio 2015, Community version)
+use cmake-gui to create the solution in build directory and build it with VS.
diff --git a/geom_matching/wasserstein/example/wasserstein_dist.cpp b/geom_matching/wasserstein/example/wasserstein_dist.cpp
new file mode 100644
index 0000000..e92ab54
--- /dev/null
+++ b/geom_matching/wasserstein/example/wasserstein_dist.cpp
@@ -0,0 +1,89 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#include <iostream>
+#include <iomanip>
+#include <fstream>
+#include <vector>
+#include <algorithm>
+#include <limits>
+#include <random>
+
+#include "wasserstein.h"
+
+// any container of pairs of doubles can be used,
+// we use vector in this example.
+
+typedef std::vector<std::pair<double, double>> PairVector;
+
+int main(int argc, char* argv[])
+{
+ PairVector diagramA, diagramB;
+
+ if (argc < 3 ) {
+ std::cerr << "Usage: " << argv[0] << " file1 file2 [wasserstein_degree] [relative_error] [internal norm]. By default power is 1.0, relative error is 0.01, internal norm is l_infinity." << std::endl;
+ return 1;
+ }
+
+ if (!geom_ws::readDiagramPointSet(argv[1], diagramA)) {
+ std::exit(1);
+ }
+
+ if (!geom_ws::readDiagramPointSet(argv[2], diagramB)) {
+ std::exit(1);
+ }
+
+ double wasserPower = (4 <= argc) ? atof(argv[3]) : 1.0;
+ if (wasserPower < 1.0) {
+ std::cerr << "The third argument (wasserstein_degree) was \"" << argv[3] << "\", must be a number >= 1.0. Cannot proceed. " << std::endl;
+ std::exit(1);
+ }
+
+ //default relative error: 1%
+ double delta = (5 <= argc) ? atof(argv[4]) : 0.01;
+ if ( delta <= 0.0) {
+ std::cerr << "The 4th argument (relative error) was \"" << argv[4] << "\", must be a number > 0.0. Cannot proceed. " << std::endl;
+ std::exit(1);
+ }
+
+ // default for internal metric is l_infinity
+ double internal_p = ( 6 <= argc ) ? atof(argv[5]) : std::numeric_limits<double>::infinity();
+ if (internal_p < 1.0) {
+ std::cerr << "The 5th argument (internal norm) was \"" << argv[5] << "\", must be a number >= 1.0. Cannot proceed. " << std::endl;
+ std::exit(1);
+ }
+
+ // if you want to specify initial value for epsilon and the factor
+ // for epsilon-scaling
+ double initialEpsilon= ( 7 <= argc ) ? atof(argv[6]) : 0.0 ;
+ double epsFactor = ( 8 <= argc ) ? atof(argv[7]) : 0.0 ;
+
+ double res = geom_ws::wassersteinDist(diagramA, diagramB, wasserPower, delta, internal_p, initialEpsilon, epsFactor);
+ std::cout << std::setprecision(15) << res << std::endl;
+ return 0;
+}
diff --git a/geom_matching/wasserstein/include/auction_oracle.h b/geom_matching/wasserstein/include/auction_oracle.h
new file mode 100644
index 0000000..e803218
--- /dev/null
+++ b/geom_matching/wasserstein/include/auction_oracle.h
@@ -0,0 +1,305 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef AUCTION_ORACLE_H
+#define AUCTION_ORACLE_H
+
+
+#define USE_BOOST_HEAP
+
+#include <map>
+#include <memory>
+#include <set>
+#include <list>
+
+#ifdef USE_BOOST_HEAP
+#include <boost/heap/d_ary_heap.hpp>
+#endif
+
+#include "basic_defs_ws.h"
+#include "dnn/geometry/euclidean-fixed.h"
+#include "dnn/local/kd-tree.h"
+
+namespace geom_ws {
+
+struct CompPairsBySecondStruct {
+ bool operator()(const IdxValPair& a, const IdxValPair& b) const
+ {
+ return a.second < b.second;
+ }
+};
+
+//
+struct CompPairsBySecondGreaterStruct {
+ bool operator()(const IdxValPair& a, const IdxValPair& b) const
+ {
+ return a.second > b.second;
+ }
+};
+
+struct CompPairsBySecondLexStruct {
+ bool operator()(const IdxValPair& a, const IdxValPair& b) const
+ {
+ return a.second < b.second or (a.second == b.second and a.first > b.first);
+ }
+};
+
+struct CompPairsBySecondLexGreaterStruct {
+ bool operator()(const IdxValPair& a, const IdxValPair& b) const
+ {
+ return a.second > b.second or (a.second == b.second and a.first > b.first);
+ }
+};
+
+using ItemsTimePair = std::pair<IdxType, int>;
+
+using UpdateList = std::list<ItemsTimePair>;
+using UpdateListIter = UpdateList::iterator;
+
+
+#ifdef USE_BOOST_HEAP
+using LossesHeap = boost::heap::d_ary_heap<IdxValPair, boost::heap::arity<2>, boost::heap::mutable_<true>, boost::heap::compare<CompPairsBySecondGreaterStruct>>;
+#else
+template<class ComparisonStruct>
+class IdxValHeap {
+public:
+ using InternalKeeper = std::set<IdxValPair, ComparisonStruct>;
+ using handle_type = typename InternalKeeper::iterator;
+ // methods
+ handle_type push(const IdxValPair& val)
+ {
+ auto resPair = _heap.insert(val);
+ assert(resPair.second);
+ assert(resPair.first != _heap.end());
+ return resPair.first;
+ }
+
+ void decrease(handle_type& handle, const IdxValPair& newVal)
+ {
+ _heap.erase(handle);
+ handle = push(newVal);
+ }
+
+ size_t size() const
+ {
+ return _heap.size();
+ }
+
+ handle_type ordered_begin()
+ {
+ return _heap.begin();
+ }
+
+ handle_type ordered_end()
+ {
+ return _heap.end();
+ }
+
+
+private:
+ std::set<IdxValPair, ComparisonStruct> _heap;
+};
+
+// if we store losses, the minimal value should come first
+using LossesHeap = IdxValHeap<CompPairsBySecondLexStruct>;
+#endif
+
+struct DebugOptimalBid {
+ DebugOptimalBid() : bestItemIdx(-1), bestItemValue(-666.666), secondBestItemIdx(-1), secondBestItemValue(-666.666) {};
+ IdxType bestItemIdx;
+ double bestItemValue;
+ IdxType secondBestItemIdx;
+ double secondBestItemValue;
+};
+
+struct AuctionOracleAbstract {
+ AuctionOracleAbstract(const std::vector<DiagramPoint>& _bidders, const std::vector<DiagramPoint>& _items, const double _wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ ~AuctionOracleAbstract() {}
+ virtual IdxValPair getOptimalBid(const IdxType bidderIdx) = 0;
+ virtual void setPrice(const IdxType itemsIdx, const double newPrice) = 0;
+ virtual void adjustPrices(void) = 0;
+ double getEpsilon() { return epsilon; };
+ virtual void setEpsilon(double newEpsilon) { assert(newEpsilon >= 0.0); epsilon = newEpsilon; };
+ std::vector<double> getPrices() { return prices; }
+protected:
+ const std::vector<DiagramPoint>& bidders;
+ const std::vector<DiagramPoint>& items;
+ std::vector<double> prices;
+ double wassersteinPower;
+ double epsilon;
+ double internal_p;
+ double getValueForBidder(size_t bidderIdx, size_t itemsIdx);
+};
+
+struct AuctionOracleLazyHeap final : AuctionOracleAbstract {
+ AuctionOracleLazyHeap(const std::vector<DiagramPoint>& bidders, const std::vector<DiagramPoint>& items, const double wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ ~AuctionOracleLazyHeap();
+ // data members
+ // temporarily make everything public
+ std::vector<std::vector<double>> weightMatrix;
+ //double weightAdjConst;
+ double maxVal;
+ // vector of heaps to find the best items
+ std::vector<LossesHeap*> lossesHeap;
+ std::vector<std::vector<LossesHeap::handle_type>> lossesHeapHandles;
+ // methods
+ void fillInLossesHeap(void);
+ void setPrice(const IdxType itemsIdx, const double newPrice) override final;
+ IdxValPair getOptimalBid(const IdxType bidderIdx) override final;
+ double getMatchingWeight(const std::vector<IdxType>& biddersToItems) const;
+ void adjustPrices(void) override final;
+ // to update the queue in lazy fashion
+ std::vector<UpdateListIter> itemsIterators;
+ UpdateList updateList;
+ std::vector<int> biddersUpdateMoments;
+ int updateCounter;
+ void updateQueueForBidder(const IdxType bidderIdx);
+ // debug
+ DebugOptimalBid getOptimalBidDebug(const IdxType bidderIdx);
+};
+
+struct AuctionOracleLazyHeapRestricted final : AuctionOracleAbstract {
+ AuctionOracleLazyHeapRestricted(const std::vector<DiagramPoint>& bidders, const std::vector<DiagramPoint>& items, const double wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ ~AuctionOracleLazyHeapRestricted();
+ // data members
+ // temporarily make everything public
+ std::vector<std::vector<double>> weightMatrix;
+ //double weightAdjConst;
+ double maxVal;
+ // vector of heaps to find the best items
+ std::vector<LossesHeap*> lossesHeap;
+ std::vector<std::vector<size_t>> itemsIndicesForHeapHandles;
+ std::vector<std::vector<LossesHeap::handle_type>> lossesHeapHandles;
+ // methods
+ void fillInLossesHeap(void);
+ void setPrice(const IdxType itemsIdx, const double newPrice) override final;
+ IdxValPair getOptimalBid(const IdxType bidderIdx) override final;
+ double getMatchingWeight(const std::vector<IdxType>& biddersToItems) const;
+ void adjustPrices(void) override final;
+ // to update the queue in lazy fashion
+ std::vector<UpdateListIter> itemsIterators;
+ UpdateList updateList;
+ std::vector<int> biddersUpdateMoments;
+ int updateCounter;
+ void updateQueueForBidder(const IdxType bidderIdx);
+ LossesHeap diagItemsHeap;
+ std::vector<LossesHeap::handle_type> diagHeapHandles;
+ std::vector<size_t> heapHandlesIndices;
+ // debug
+
+ DebugOptimalBid getOptimalBidDebug(const IdxType bidderIdx);
+
+ // for diagonal points
+ bool bestDiagonalItemsComputed;
+ size_t bestDiagonalItemIdx;
+ double bestDiagonalItemValue;
+ size_t secondBestDiagonalItemIdx;
+ double secondBestDiagonalItemValue;
+};
+
+struct AuctionOracleKDTree final : AuctionOracleAbstract {
+ typedef dnn::Point<2, double> DnnPoint;
+ typedef dnn::PointTraits<DnnPoint> DnnTraits;
+
+ AuctionOracleKDTree(const std::vector<DiagramPoint>& bidders, const std::vector<DiagramPoint>& items, const double wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ ~AuctionOracleKDTree();
+ // data members
+ // temporarily make everything public
+ double maxVal;
+ double weightAdjConst;
+ dnn::KDTree<DnnTraits>* kdtree;
+ std::vector<DnnPoint> dnnPoints;
+ std::vector<DnnPoint*> dnnPointHandles;
+ dnn::KDTree<DnnTraits>* kdtreeAll;
+ std::vector<DnnPoint> dnnPointsAll;
+ std::vector<DnnPoint*> dnnPointHandlesAll;
+ LossesHeap diagItemsHeap;
+ std::vector<LossesHeap::handle_type> diagHeapHandles;
+ std::vector<size_t> heapHandlesIndices;
+ std::vector<size_t> kdtreeItems;
+ // vector of heaps to find the best items
+ void setPrice(const IdxType itemsIdx, const double newPrice) override final;
+ IdxValPair getOptimalBid(const IdxType bidderIdx) override final;
+ void adjustPrices(void) override final;
+ // debug routines
+ DebugOptimalBid getOptimalBidDebug(IdxType bidderIdx);
+ void setEpsilon(double newVal) override final;
+};
+
+struct AuctionOracleKDTreeRestricted final : AuctionOracleAbstract {
+ typedef dnn::Point<2, double> DnnPoint;
+ typedef dnn::PointTraits<DnnPoint> DnnTraits;
+
+ AuctionOracleKDTreeRestricted(const std::vector<DiagramPoint>& bidders, const std::vector<DiagramPoint>& items, const double wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ ~AuctionOracleKDTreeRestricted();
+ // data members
+ // temporarily make everything public
+ double maxVal;
+ double weightAdjConst;
+ dnn::KDTree<DnnTraits>* kdtree;
+ std::vector<DnnPoint> dnnPoints;
+ std::vector<DnnPoint*> dnnPointHandles;
+ std::vector<DnnPoint> dnnPointsAll;
+ std::vector<DnnPoint*> dnnPointHandlesAll;
+ LossesHeap diagItemsHeap;
+ std::vector<LossesHeap::handle_type> diagHeapHandles;
+ std::vector<size_t> heapHandlesIndices;
+ std::vector<size_t> kdtreeItems;
+ // vector of heaps to find the best items
+ void setPrice(const IdxType itemsIdx, const double newPrice) override final;
+ IdxValPair getOptimalBid(const IdxType bidderIdx) override final;
+ void adjustPrices(void) override final;
+ // debug routines
+ DebugOptimalBid getOptimalBidDebug(IdxType bidderIdx);
+ void setEpsilon(double newVal) override final;
+
+
+ bool bestDiagonalItemsComputed;
+ size_t bestDiagonalItemIdx;
+ double bestDiagonalItemValue;
+ size_t secondBestDiagonalItemIdx;
+ double secondBestDiagonalItemValue;
+};
+
+struct AuctionOracleRestricted final : AuctionOracleAbstract {
+ AuctionOracleRestricted(const std::vector<DiagramPoint>& bidders, const std::vector<DiagramPoint>& items, const double wassersteinPower, const double _internal_p = std::numeric_limits<double>::infinity());
+ IdxValPair getOptimalBid(const IdxType bidderIdx) override;
+ void setPrice(const IdxType itemsIdx, const double newPrice) override;
+ void adjustPrices(void) override {};
+ void setEpsilon(double newEpsilon) override { assert(newEpsilon >= 0.0); epsilon = newEpsilon; };
+ // data
+ std::vector<std::vector<double>> weightMatrix;
+ double maxVal;
+ constexpr static bool isRestricted = true;
+};
+
+std::ostream& operator<< (std::ostream& output, const DebugOptimalBid& db);
+
+} // end of namespace geom_ws
+
+#endif
diff --git a/geom_matching/wasserstein/include/auction_runner_gs.h b/geom_matching/wasserstein/include/auction_runner_gs.h
new file mode 100644
index 0000000..34a91e8
--- /dev/null
+++ b/geom_matching/wasserstein/include/auction_runner_gs.h
@@ -0,0 +1,122 @@
+/*
+
+Copyright (c) 2016, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef AUCTION_RUNNER_GS_H
+#define AUCTION_RUNNER_GS_H
+
+#include <unordered_set>
+
+#include "auction_oracle.h"
+
+//#define KEEP_UNASSIGNED_ORDERED
+// if this symbol is defined,
+// unassigned bidders are processed in a lexicographic order.
+// See LexicogrCompDiagramPoint comparator.
+
+
+namespace geom_ws {
+
+//using AuctionOracle = AuctionOracleLazyHeapRestricted;
+using AuctionOracle = AuctionOracleKDTreeRestricted;
+
+#ifdef KEEP_UNASSIGNED_ORDERED
+using IdxPointPair = std::pair<size_t, DiagramPoint>;
+
+struct LexicogrCompDiagramPoint {
+ bool operator ()(const IdxPointPair& a, const IdxPointPair& b) {
+ const auto& p1 = a.second;
+ const auto& p2 = b.second;
+
+ return ( (not p1.isDiagonal() and p2.isDiagonal()) or
+ ( p1.isDiagonal() == p2.isDiagonal() and p1.getRealX() < p2.getRealX() ) or
+ ( p1.isDiagonal() == p2.isDiagonal() and p1.getRealX() == p2.getRealX() and p1.getRealY() < p2.getRealY() ) or
+ ( p1.isDiagonal() == p2.isDiagonal() and p1.getRealX() == p2.getRealX() and p1.getRealY() == p2.getRealY() and a.first < b.first ) );
+ }
+};
+
+using OrderedUnassignedKeeper = std::set<IdxPointPair, LexicogrCompDiagramPoint>;
+#endif
+
+// the two parameters that you can tweak in auction algorithm are:
+// 1. epsilonCommonRatio
+// 2. maxIterNum
+
+class AuctionRunnerGS {
+public:
+ AuctionRunnerGS(const std::vector<DiagramPoint>& A,
+ const std::vector<DiagramPoint>& B,
+ const double q,
+ const double _delta,
+ const double _internal_p,
+ const double _initialEpsilon,
+ const double _epsFactor);
+ void setEpsilon(double newVal) { assert(epsilon > 0.0); epsilon = newVal; };
+ double getEpsilon(void) const { return epsilon; }
+ double getWassersteinDistance(void);
+ static constexpr int maxIterNum { 25 }; // maximal number of iterations of epsilon-scaling
+private:
+ // private data
+ std::vector<DiagramPoint> bidders, items;
+ const size_t numBidders;
+ const size_t numItems;
+ std::vector<IdxType> itemsToBidders;
+ std::vector<IdxType> biddersToItems;
+ double wassersteinPower;
+ double epsilon;
+ double delta;
+ double internal_p;
+ double initialEpsilon;
+ double epsilonCommonRatio; // next epsilon = current epsilon / epsilonCommonRatio
+ double weightAdjConst;
+ double wassersteinDistance;
+ // to get the 2 best items
+ std::unique_ptr<AuctionOracle> oracle;
+#ifdef KEEP_UNASSIGNED_ORDERED
+ OrderedUnassignedKeeper unassignedBidders;
+#else
+ std::unordered_set<size_t> unassignedBidders;
+#endif
+ // private methods
+ void assignItemToBidder(const IdxType bidderIdx, const IdxType itemsIdx);
+ void clearBidTable(void);
+ void runAuction(void);
+ void runAuctionPhase(void);
+ void flushAssignment(void);
+
+ // for debug only
+ void sanityCheck(void);
+ void printDebug(void);
+ int countUnhappy(void);
+ void printMatching(void);
+ double getDistanceToQthPowerInternal(void);
+ int numRounds { 0 };
+};
+
+} // end of namespace geom_ws
+
+#endif
diff --git a/geom_matching/wasserstein/include/auction_runner_jac.h b/geom_matching/wasserstein/include/auction_runner_jac.h
new file mode 100644
index 0000000..ae0cb56
--- /dev/null
+++ b/geom_matching/wasserstein/include/auction_runner_jac.h
@@ -0,0 +1,97 @@
+/*
+
+Copyright (c) 2016, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef AUCTION_RUNNER_JAK_H
+#define AUCTION_RUNNER_JAK_H
+
+#include <unordered_set>
+
+#include "auction_oracle.h"
+
+namespace geom_ws {
+
+using AuctionOracle = AuctionOracleKDTreeRestricted;
+
+// the two parameters that you can tweak in auction algorithm are:
+// 1. epsilonCommonRatio
+// 2. maxIterNum
+
+// the two parameters that you can tweak in auction algorithm are:
+// 1. epsilonCommonRatio
+// 2. maxIterNum
+
+class AuctionRunnerJak {
+public:
+ AuctionRunnerJak(const std::vector<DiagramPoint>& A, const std::vector<DiagramPoint>& B, const double q, const double _delta, const double _internal_p);
+ void setEpsilon(double newVal) { assert(epsilon > 0.0); epsilon = newVal; };
+ double getEpsilon(void) const { return epsilon; }
+ double getWassersteinDistance(void);
+ static constexpr double epsilonCommonRatio { 5 }; // next epsilon = current epsilon / epsilonCommonRatio
+ static constexpr int maxIterNum { 25 }; // maximal number of iterations of epsilon-scaling
+private:
+ // private data
+ std::vector<DiagramPoint> bidders, items;
+ const size_t numBidders;
+ const size_t numItems;
+ std::vector<IdxType> itemsToBidders;
+ std::vector<IdxType> biddersToItems;
+ double wassersteinPower;
+ double epsilon;
+ double delta;
+ double internal_p;
+ double weightAdjConst;
+ double wassersteinDistance;
+ std::vector<IdxValPair> bidTable;
+ // to get the 2 best items
+ std::unique_ptr<AuctionOracle> oracle;
+ std::list<size_t> unassignedBidders;
+ std::vector< std::list<size_t>::iterator > unassignedBiddersIterators;
+ std::vector< short > itemReceivedBidVec;
+ std::list<size_t> itemsWithBids;
+ // private methods
+ void assignGoodToBidder(const IdxType bidderIdx, const IdxType itemsIdx);
+ void assignToBestBidder(const IdxType itemsIdx);
+ void clearBidTable(void);
+ void runAuction(void);
+ void runAuctionPhase(void);
+ void submitBid(IdxType bidderIdx, const IdxValPair& itemsBidValuePair);
+ void flushAssignment(void);
+
+ // for debug only
+ void sanityCheck(void);
+ void printDebug(void);
+ int countUnhappy(void);
+ void printMatching(void);
+ double getDistanceToQthPowerInternal(void);
+};
+
+
+
+} // end of namespace geom_ws
+
+#endif
diff --git a/geom_matching/wasserstein/include/basic_defs_ws.h b/geom_matching/wasserstein/include/basic_defs_ws.h
new file mode 100644
index 0000000..20cd2a0
--- /dev/null
+++ b/geom_matching/wasserstein/include/basic_defs_ws.h
@@ -0,0 +1,114 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef BASIC_DEFS_WS_H
+#define BASIC_DEFS_WS_H
+
+#include <vector>
+#include <math.h>
+#include <cstddef>
+#include <unordered_map>
+#include <unordered_set>
+#include <iostream>
+#include <string>
+#include <assert.h>
+
+#ifdef _WIN32
+#include <ciso646>
+#endif
+
+
+#include "def_debug.h"
+
+#define MIN_VALID_ID 10
+
+namespace geom_ws {
+
+using IdxType = int;
+using IdxValPair = std::pair<IdxType, double>;
+
+
+struct Point {
+ double x, y;
+ bool operator==(const Point& other) const;
+ bool operator!=(const Point& other) const;
+ Point(double ax, double ay) : x(ax), y(ay) {}
+ Point() : x(0.0), y(0.0) {}
+ friend std::ostream& operator<<(std::ostream& output, const Point p);
+};
+
+struct DiagramPoint
+{
+ // data members
+ // Points above the diagonal have type NORMAL
+ // Projections onto the diagonal have type DIAG
+ // for DIAG points only x-coordinate is relevant
+ enum Type { NORMAL, DIAG};
+ double x, y;
+ Type type;
+ // methods
+ DiagramPoint(double xx, double yy, Type ttype);
+ bool isDiagonal(void) const { return type == DIAG; }
+ bool isNormal(void) const { return type == NORMAL; }
+ double getRealX() const; // return the x-coord
+ double getRealY() const; // return the y-coord
+ friend std::ostream& operator<<(std::ostream& output, const DiagramPoint p);
+
+ struct LexicographicCmp
+ {
+ bool operator()(const DiagramPoint& p1, const DiagramPoint& p2) const
+ { return p1.type < p2.type || (p1.type == p2.type && (p1.x < p2.x || (p1.x == p2.x && p1.y < p2.y))); }
+ };
+};
+
+double sqrDist(const Point& a, const Point& b);
+double dist(const Point& a, const Point& b);
+double distLInf(const DiagramPoint& a, const DiagramPoint& b);
+double distLp(const DiagramPoint& a, const DiagramPoint& b, const double p);
+
+template<typename DiagPointContainer>
+double getFurthestDistance3Approx(DiagPointContainer& A, DiagPointContainer& B)
+{
+ double result { 0.0 };
+ DiagramPoint begA = *(A.begin());
+ DiagramPoint optB = *(B.begin());
+ for(const auto& pointB : B) {
+ if (distLInf(begA, pointB) > result) {
+ result = distLInf(begA, pointB);
+ optB = pointB;
+ }
+ }
+ for(const auto& pointA : A) {
+ if (distLInf(pointA, optB) > result) {
+ result = distLInf(pointA, optB);
+ }
+ }
+ return result;
+}
+
+} // end of namespace geom_ws
+#endif
diff --git a/geom_matching/wasserstein/include/def_debug.h b/geom_matching/wasserstein/include/def_debug.h
new file mode 100644
index 0000000..7323c18
--- /dev/null
+++ b/geom_matching/wasserstein/include/def_debug.h
@@ -0,0 +1,36 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef DEF_DEBUG_H
+#define DEF_DEBUG_H
+
+//#define DEBUG_BOUND_MATCH
+//#define DEBUG_NEIGHBOUR_ORACLE
+//#define DEBUG_MATCHING
+//#define DEBUG_AUCTION
+
+#endif
diff --git a/geom_matching/wasserstein/include/dnn/geometry/euclidean-fixed.h b/geom_matching/wasserstein/include/dnn/geometry/euclidean-fixed.h
new file mode 100644
index 0000000..a6ccef7
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/geometry/euclidean-fixed.h
@@ -0,0 +1,190 @@
+#ifndef DNN_GEOMETRY_EUCLIDEAN_FIXED_H
+#define DNN_GEOMETRY_EUCLIDEAN_FIXED_H
+
+#include <boost/operators.hpp>
+#include <boost/array.hpp>
+#include <boost/range/value_type.hpp>
+#include <boost/serialization/access.hpp>
+#include <boost/serialization/base_object.hpp>
+
+#include <iostream>
+#include <fstream>
+#include <string>
+#include <sstream>
+#include <cmath>
+
+#include "../parallel/tbb.h" // for dnn::vector<...>
+
+namespace dnn
+{
+ // TODO: wrap in another namespace (e.g., euclidean)
+
+ template<size_t D, typename Real = double>
+ struct Point:
+ boost::addable< Point<D,Real>,
+ boost::subtractable< Point<D,Real>,
+ boost::dividable2< Point<D, Real>, Real,
+ boost::multipliable2< Point<D, Real>, Real > > > >,
+ public boost::array<Real, D>
+ {
+ public:
+ typedef Real Coordinate;
+ typedef Real DistanceType;
+
+
+ public:
+ Point(size_t id = 0): id_(id) {}
+ template<size_t DD>
+ Point(const Point<DD,Real>& p, size_t id = 0):
+ id_(id) { *this = p; }
+
+ static size_t dimension() { return D; }
+
+ // Assign a point of different dimension
+ template<size_t DD>
+ Point& operator=(const Point<DD,Real>& p) { for (size_t i = 0; i < (D < DD ? D : DD); ++i) (*this)[i] = p[i]; if (DD < D) for (size_t i = DD; i < D; ++i) (*this)[i] = 0; return *this; }
+
+ Point& operator+=(const Point& p) { for (size_t i = 0; i < D; ++i) (*this)[i] += p[i]; return *this; }
+ Point& operator-=(const Point& p) { for (size_t i = 0; i < D; ++i) (*this)[i] -= p[i]; return *this; }
+ Point& operator/=(Real r) { for (size_t i = 0; i < D; ++i) (*this)[i] /= r; return *this; }
+ Point& operator*=(Real r) { for (size_t i = 0; i < D; ++i) (*this)[i] *= r; return *this; }
+
+ Real norm2() const { Real n = 0; for (size_t i = 0; i < D; ++i) n += (*this)[i] * (*this)[i]; return n; }
+ Real max_norm() const
+ {
+ Real res = std::fabs((*this)[0]);
+ for (size_t i = 1; i < D; ++i)
+ if (std::fabs((*this)[i]) > res)
+ res = std::fabs((*this)[i]);
+ return res;
+ }
+
+ Real l1_norm() const
+ {
+ Real res = std::fabs((*this)[0]);
+ for (size_t i = 1; i < D; ++i)
+ res += std::fabs((*this)[i]);
+ return res;
+ }
+
+ Real lp_norm(const Real p) const
+ {
+ assert( !std::isinf(p) );
+ if ( p == 1.0 )
+ return l1_norm();
+ Real res = std::pow(std::fabs((*this)[0]), p);
+ for (size_t i = 1; i < D; ++i)
+ res += std::pow(std::fabs((*this)[i]), p);
+ return std::pow(res, 1.0 / p);
+ }
+
+ // quick and dirty for now; make generic later
+ //DistanceType distance(const Point& other) const { return sqrt(sq_distance(other)); }
+ //DistanceType sq_distance(const Point& other) const { return (other - *this).norm2(); }
+
+ DistanceType distance(const Point& other) const { return (other - *this).max_norm(); }
+ DistanceType p_distance(const Point& other, const double p) const { return (other - *this).lp_norm(p); }
+
+ size_t id() const { return id_; }
+ size_t& id() { return id_; }
+
+ private:
+ friend class boost::serialization::access;
+
+ template<class Archive>
+ void serialize(Archive& ar, const unsigned int version) { ar & boost::serialization::base_object< boost::array<Real,D> >(*this) & id_; }
+
+ private:
+ size_t id_;
+ };
+
+ template<size_t D, typename Real>
+ std::ostream&
+ operator<<(std::ostream& out, const Point<D,Real>& p)
+ { out << p[0]; for (size_t i = 1; i < D; ++i) out << " " << p[i]; return out; }
+
+
+ template<class Point>
+ struct PointTraits; // intentionally undefined; should be specialized for each type
+
+
+ template<size_t D, typename Real>
+ struct PointTraits< Point<D, Real> > // specialization for dnn::Point
+ {
+ typedef Point<D,Real> PointType;
+ typedef const PointType* PointHandle;
+ typedef std::vector<PointType> PointContainer;
+
+ typedef typename PointType::Coordinate Coordinate;
+ typedef typename PointType::DistanceType DistanceType;
+
+
+ static DistanceType
+ distance(const PointType& p1, const PointType& p2) { if (std::isinf(internal_p)) return p1.distance(p2); else return p1.p_distance(p2, internal_p); }
+
+ static DistanceType
+ distance(PointHandle p1, PointHandle p2) { return distance(*p1,*p2); }
+
+ static size_t dimension() { return D; }
+ static Real coordinate(const PointType& p, size_t i) { return p[i]; }
+ static Real& coordinate(PointType& p, size_t i) { return p[i]; }
+ static Real coordinate(PointHandle p, size_t i) { return coordinate(*p,i); }
+
+ static size_t id(const PointType& p) { return p.id(); }
+ static size_t& id(PointType& p) { return p.id(); }
+ static size_t id(PointHandle p) { return id(*p); }
+
+ static PointHandle
+ handle(const PointType& p) { return &p; }
+ static const PointType&
+ point(PointHandle ph) { return *ph; }
+
+ void swap(PointType& p1, PointType& p2) const { return std::swap(p1, p2); }
+
+ static PointContainer
+ container(size_t n = 0, const PointType& p = PointType()) { return PointContainer(n, p); }
+ static typename PointContainer::iterator
+ iterator(PointContainer& c, PointHandle ph) { return c.begin() + (ph - &c[0]); }
+ static typename PointContainer::const_iterator
+ iterator(const PointContainer& c, PointHandle ph) { return c.begin() + (ph - &c[0]); }
+
+ // Internal_p determines which norm will be used in Wasserstein metric (not to
+ // be confused with wassersteinPower parameter:
+ // we raise \| p - q \|_{internal_p} to wassersteinPower.
+ static Real internal_p;
+
+ private:
+
+ friend class boost::serialization::access;
+
+ template<class Archive>
+ void serialize(Archive& ar, const unsigned int version) {}
+
+ };
+
+ template<size_t D, typename Real>
+ Real PointTraits< Point<D, Real> >::internal_p = std::numeric_limits<Real>::infinity();
+
+
+ template<class PointContainer>
+ void read_points(const std::string& filename, PointContainer& points)
+ {
+ typedef typename boost::range_value<PointContainer>::type Point;
+ typedef typename PointTraits<Point>::Coordinate Coordinate;
+
+ std::ifstream in(filename.c_str());
+ std::string line;
+ while(std::getline(in, line))
+ {
+ if (line[0] == '#') continue; // comment line in the file
+ std::stringstream linestream(line);
+ Coordinate x;
+ points.push_back(Point());
+ size_t i = 0;
+ while (linestream >> x)
+ points.back()[i++] = x;
+ }
+ }
+}
+
+#endif
diff --git a/geom_matching/wasserstein/include/dnn/local/kd-tree.h b/geom_matching/wasserstein/include/dnn/local/kd-tree.h
new file mode 100644
index 0000000..7e01072
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/local/kd-tree.h
@@ -0,0 +1,90 @@
+#ifndef DNN_LOCAL_KD_TREE_H
+#define DNN_LOCAL_KD_TREE_H
+
+#include "../utils.h"
+#include "search-functors.h"
+
+#include <unordered_map>
+
+#include <boost/tuple/tuple.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/range/value_type.hpp>
+
+#include <boost/static_assert.hpp>
+#include <boost/type_traits.hpp>
+
+namespace dnn
+{
+ // Weighted KDTree
+ // Traits_ provides Coordinate, DistanceType, PointType, dimension(), distance(p1,p2), coordinate(p,i)
+ template< class Traits_ >
+ class KDTree
+ {
+ public:
+ typedef Traits_ Traits;
+ typedef dnn::HandleDistance<KDTree> HandleDistance;
+
+ typedef typename Traits::PointType Point;
+ typedef typename Traits::PointHandle PointHandle;
+ typedef typename Traits::Coordinate Coordinate;
+ typedef typename Traits::DistanceType DistanceType;
+ typedef std::vector<PointHandle> HandleContainer;
+ typedef std::vector<HandleDistance> HDContainer; // TODO: use tbb::scalable_allocator
+ typedef HDContainer Result;
+ typedef std::vector<DistanceType> DistanceContainer;
+ typedef std::unordered_map<PointHandle, size_t> HandleMap;
+
+ BOOST_STATIC_ASSERT_MSG(has_coordinates<Traits, PointHandle, int>::value, "KDTree requires coordinates");
+
+ public:
+ KDTree(const Traits& traits):
+ traits_(traits) {}
+
+ KDTree(const Traits& traits, HandleContainer&& handles, double _wassersteinPower = 1.0);
+
+ template<class Range>
+ KDTree(const Traits& traits, const Range& range, double _wassersteinPower = 1.0);
+
+ template<class Range>
+ void init(const Range& range);
+
+ DistanceType weight(PointHandle p) { return weights_[indices_[p]]; }
+ void increase_weight(PointHandle p, DistanceType w);
+
+ HandleDistance find(PointHandle q) const;
+ Result findR(PointHandle q, DistanceType r) const; // all neighbors within r
+ Result findK(PointHandle q, size_t k) const; // k nearest neighbors
+
+ HandleDistance find(const Point& q) const { return find(traits().handle(q)); }
+ Result findR(const Point& q, DistanceType r) const { return findR(traits().handle(q), r); }
+ Result findK(const Point& q, size_t k) const { return findK(traits().handle(q), k); }
+
+ template<class ResultsFunctor>
+ void search(PointHandle q, ResultsFunctor& rf) const;
+
+ const Traits& traits() const { return traits_; }
+
+ void printWeights(void);
+
+ private:
+ void init();
+
+ typedef typename HandleContainer::iterator HCIterator;
+ typedef std::tuple<HCIterator, HCIterator, size_t> KDTreeNode;
+
+ struct CoordinateComparison;
+ struct OrderTree;
+
+ private:
+ Traits traits_;
+ HandleContainer tree_;
+ DistanceContainer weights_; // point weight
+ DistanceContainer subtree_weights_; // min weight in the subtree
+ HandleMap indices_;
+ double wassersteinPower;
+ };
+}
+
+#include "kd-tree.hpp"
+
+#endif
diff --git a/geom_matching/wasserstein/include/dnn/local/kd-tree.hpp b/geom_matching/wasserstein/include/dnn/local/kd-tree.hpp
new file mode 100644
index 0000000..151a4ad
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/local/kd-tree.hpp
@@ -0,0 +1,303 @@
+#include <boost/range/counting_range.hpp>
+#include <boost/range/algorithm_ext/push_back.hpp>
+#include <boost/range.hpp>
+
+#include <queue>
+#include <stack>
+
+#include "../parallel/tbb.h"
+
+template<class T>
+dnn::KDTree<T>::
+KDTree(const Traits& traits, HandleContainer&& handles, double _wassersteinPower):
+ traits_(traits), tree_(std::move(handles)), wassersteinPower(_wassersteinPower)
+{ assert(wassersteinPower >= 1.0); init(); }
+
+template<class T>
+template<class Range>
+dnn::KDTree<T>::
+KDTree(const Traits& traits, const Range& range, double _wassersteinPower):
+ traits_(traits), wassersteinPower(_wassersteinPower)
+{
+ assert( wassersteinPower >= 1.0);
+ init(range);
+}
+
+template<class T>
+template<class Range>
+void
+dnn::KDTree<T>::
+init(const Range& range)
+{
+ size_t sz = std::distance(std::begin(range), std::end(range));
+ tree_.reserve(sz);
+ weights_.resize(sz, 0);
+ subtree_weights_.resize(sz, 0);
+ for (PointHandle h : range)
+ tree_.push_back(h);
+ init();
+}
+
+template<class T>
+void
+dnn::KDTree<T>::
+init()
+{
+ if (tree_.empty())
+ return;
+
+#if defined(TBB)
+ task_group g;
+ g.run(OrderTree(tree_.begin(), tree_.end(), 0, traits()));
+ g.wait();
+#else
+ OrderTree(tree_.begin(), tree_.end(), 0, traits()).serial();
+#endif
+
+ for (size_t i = 0; i < tree_.size(); ++i)
+ indices_[tree_[i]] = i;
+}
+
+template<class T>
+struct
+dnn::KDTree<T>::OrderTree
+{
+ OrderTree(HCIterator b_, HCIterator e_, size_t i_, const Traits& traits_):
+ b(b_), e(e_), i(i_), traits(traits_) {}
+
+ void operator()() const
+ {
+ if (e - b < 1000)
+ {
+ serial();
+ return;
+ }
+
+ HCIterator m = b + (e - b)/2;
+ CoordinateComparison cmp(i, traits);
+ std::nth_element(b,m,e, cmp);
+ size_t next_i = (i + 1) % traits.dimension();
+
+ task_group g;
+ if (b < m - 1) g.run(OrderTree(b, m, next_i, traits));
+ if (e > m + 2) g.run(OrderTree(m+1, e, next_i, traits));
+ g.wait();
+ }
+
+ void serial() const
+ {
+ std::queue<KDTreeNode> q;
+ q.push(KDTreeNode(b,e,i));
+ while (!q.empty())
+ {
+ HCIterator b, e; size_t i;
+ std::tie(b,e,i) = q.front();
+ q.pop();
+ HCIterator m = b + (e - b)/2;
+
+ CoordinateComparison cmp(i, traits);
+ std::nth_element(b,m,e, cmp);
+ size_t next_i = (i + 1) % traits.dimension();
+
+ // Replace with a size condition instead?
+ if (b < m - 1) q.push(KDTreeNode(b, m, next_i));
+ if (e - m > 2) q.push(KDTreeNode(m+1, e, next_i));
+ }
+ }
+
+ HCIterator b, e;
+ size_t i;
+ const Traits& traits;
+};
+
+template<class T>
+template<class ResultsFunctor>
+void
+dnn::KDTree<T>::
+search(PointHandle q, ResultsFunctor& rf) const
+{
+ typedef typename HandleContainer::const_iterator HCIterator;
+ typedef std::tuple<HCIterator, HCIterator, size_t> KDTreeNode;
+
+ if (tree_.empty())
+ return;
+
+ DistanceType D = std::numeric_limits<DistanceType>::infinity();
+
+ // TODO: use tbb::scalable_allocator for the queue
+ std::queue<KDTreeNode> nodes;
+
+
+
+ nodes.push(KDTreeNode(tree_.begin(), tree_.end(), 0));
+
+
+ //std::cout << "started kdtree::search" << std::endl;
+
+ while (!nodes.empty())
+ {
+ HCIterator b, e; size_t i;
+ std::tie(b,e,i) = nodes.front();
+ nodes.pop();
+
+ CoordinateComparison cmp(i, traits());
+ i = (i + 1) % traits().dimension();
+
+ HCIterator m = b + (e - b)/2;
+ DistanceType dist = pow(traits().distance(q, *m), wassersteinPower) + weights_[m - tree_.begin()];
+
+
+ D = rf(*m, dist);
+
+ // we are really searching w.r.t L_\infty ball; could prune better with an L_2 ball
+ Coordinate diff = cmp.diff(q, *m); // diff returns signed distance
+ DistanceType diffToWasserPower = (diff > 0 ? 1.0 : -1.0) * pow(fabs(diff), wassersteinPower);
+
+ size_t lm = m + 1 + (e - (m+1))/2 - tree_.begin();
+ if (e > m + 1 && diffToWasserPower - subtree_weights_[lm] >= -D) {
+ nodes.push(KDTreeNode(m+1, e, i));
+ }
+
+ size_t rm = b + (m - b) / 2 - tree_.begin();
+ if (b < m && diffToWasserPower + subtree_weights_[rm] <= D) {
+ nodes.push(KDTreeNode(b, m, i));
+ }
+ }
+ //std::cout << "exited kdtree::search" << std::endl;
+}
+
+template<class T>
+void
+dnn::KDTree<T>::
+increase_weight(PointHandle p, DistanceType w)
+{
+ size_t idx = indices_[p];
+ // weight should only increase
+ assert( weights_[idx] <= w );
+ weights_[idx] = w;
+
+ typedef std::tuple<HCIterator, HCIterator> KDTreeNode;
+
+ // find the path down the tree to this node
+ // not an ideal strategy, but // it's not clear how to move up from the node in general
+ std::stack<KDTreeNode> s;
+ s.push(KDTreeNode(tree_.begin(),tree_.end()));
+
+ do
+ {
+ HCIterator b,e;
+ std::tie(b,e) = s.top();
+
+ size_t im = b + (e - b)/2 - tree_.begin();
+
+ if (idx == im)
+ break;
+ else if (idx < im)
+ s.push(KDTreeNode(b, tree_.begin() + im));
+ else // idx > im
+ s.push(KDTreeNode(tree_.begin() + im + 1, e));
+ } while(1);
+
+ // update subtree_weights_ on the path to the root
+ DistanceType min_w = w;
+ while (!s.empty())
+ {
+ HCIterator b,e;
+ std::tie(b,e) = s.top();
+ HCIterator m = b + (e - b)/2;
+ size_t im = m - tree_.begin();
+ s.pop();
+
+
+ // left and right children
+ if (b < m)
+ {
+ size_t lm = b + (m - b)/2 - tree_.begin();
+ if (subtree_weights_[lm] < min_w)
+ min_w = subtree_weights_[lm];
+ }
+
+ if (e > m + 1)
+ {
+ size_t rm = m + 1 + (e - (m+1))/2 - tree_.begin();
+ if (subtree_weights_[rm] < min_w)
+ min_w = subtree_weights_[rm];
+ }
+
+ if (weights_[im] < min_w) {
+ min_w = weights_[im];
+ }
+
+ if (subtree_weights_[im] < min_w ) // increase weight
+ subtree_weights_[im] = min_w;
+ else
+ break;
+ }
+}
+
+template<class T>
+typename dnn::KDTree<T>::HandleDistance
+dnn::KDTree<T>::
+find(PointHandle q) const
+{
+ dnn::NNRecord<HandleDistance> nn;
+ search(q, nn);
+ return nn.result;
+}
+
+template<class T>
+typename dnn::KDTree<T>::Result
+dnn::KDTree<T>::
+findR(PointHandle q, DistanceType r) const
+{
+ dnn::rNNRecord<HandleDistance> rnn(r);
+ search(q, rnn);
+ std::sort(rnn.result.begin(), rnn.result.end());
+ return rnn.result;
+}
+
+template<class T>
+typename dnn::KDTree<T>::Result
+dnn::KDTree<T>::
+findK(PointHandle q, size_t k) const
+{
+ dnn::kNNRecord<HandleDistance> knn(k);
+ search(q, knn);
+ std::sort(knn.result.begin(), knn.result.end());
+ return knn.result;
+}
+
+
+template<class T>
+struct dnn::KDTree<T>::CoordinateComparison
+{
+ CoordinateComparison(size_t i, const Traits& traits):
+ i_(i), traits_(traits) {}
+
+ bool operator()(PointHandle p1, PointHandle p2) const { return coordinate(p1) < coordinate(p2); }
+ Coordinate diff(PointHandle p1, PointHandle p2) const { return coordinate(p1) - coordinate(p2); }
+
+ Coordinate coordinate(PointHandle p) const { return traits_.coordinate(p, i_); }
+ size_t axis() const { return i_; }
+
+ private:
+ size_t i_;
+ const Traits& traits_;
+};
+
+template<class T>
+void
+dnn::KDTree<T>::
+printWeights(void)
+{
+ std::cout << "weights_:" << std::endl;
+ for(const auto ph : indices_) {
+ std::cout << "idx = " << ph.second << ": (" << (ph.first)->at(0) << ", " << (ph.first)->at(1) << ") weight = " << weights_[ph.second] << std::endl;
+ }
+ std::cout << "subtree_weights_:" << std::endl;
+ for(size_t idx = 0; idx < subtree_weights_.size(); ++idx) {
+ std::cout << idx << " : " << subtree_weights_[idx] << std::endl;
+ }
+}
+
+
diff --git a/geom_matching/wasserstein/include/dnn/local/search-functors.h b/geom_matching/wasserstein/include/dnn/local/search-functors.h
new file mode 100644
index 0000000..f257d0c
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/local/search-functors.h
@@ -0,0 +1,89 @@
+#ifndef DNN_LOCAL_SEARCH_FUNCTORS_H
+#define DNN_LOCAL_SEARCH_FUNCTORS_H
+
+#include <boost/range/algorithm/heap_algorithm.hpp>
+
+namespace dnn
+{
+
+template<class NN>
+struct HandleDistance
+{
+ typedef typename NN::PointHandle PointHandle;
+ typedef typename NN::DistanceType DistanceType;
+ typedef typename NN::HDContainer HDContainer;
+
+ HandleDistance() {}
+ HandleDistance(PointHandle pp, DistanceType dd):
+ p(pp), d(dd) {}
+ bool operator<(const HandleDistance& other) const { return d < other.d; }
+
+ PointHandle p;
+ DistanceType d;
+};
+
+template<class HandleDistance>
+struct NNRecord
+{
+ typedef typename HandleDistance::PointHandle PointHandle;
+ typedef typename HandleDistance::DistanceType DistanceType;
+
+ NNRecord() { result.d = std::numeric_limits<DistanceType>::infinity(); }
+ DistanceType operator()(PointHandle p, DistanceType d) { if (d < result.d) { result.p = p; result.d = d; } return result.d; }
+ HandleDistance result;
+};
+
+template<class HandleDistance>
+struct rNNRecord
+{
+ typedef typename HandleDistance::PointHandle PointHandle;
+ typedef typename HandleDistance::DistanceType DistanceType;
+ typedef typename HandleDistance::HDContainer HDContainer;
+
+ rNNRecord(DistanceType r_): r(r_) {}
+ DistanceType operator()(PointHandle p, DistanceType d)
+ {
+ if (d <= r)
+ result.push_back(HandleDistance(p,d));
+ return r;
+ }
+
+ DistanceType r;
+ HDContainer result;
+};
+
+template<class HandleDistance>
+struct kNNRecord
+{
+ typedef typename HandleDistance::PointHandle PointHandle;
+ typedef typename HandleDistance::DistanceType DistanceType;
+ typedef typename HandleDistance::HDContainer HDContainer;
+
+ kNNRecord(unsigned k_): k(k_) {}
+ DistanceType operator()(PointHandle p, DistanceType d)
+ {
+ if (result.size() < k)
+ {
+ result.push_back(HandleDistance(p,d));
+ boost::push_heap(result);
+ if (result.size() < k)
+ return std::numeric_limits<DistanceType>::infinity();
+ } else if (d < result[0].d)
+ {
+ boost::pop_heap(result);
+ result.back() = HandleDistance(p,d);
+ boost::push_heap(result);
+ }
+ if ( result.size() > 1 ) {
+ assert( result[0].d >= result[1].d );
+ }
+ return result[0].d;
+ }
+
+ unsigned k;
+ HDContainer result;
+};
+
+}
+
+#endif // DNN_LOCAL_SEARCH_FUNCTORS_H
diff --git a/geom_matching/wasserstein/include/dnn/parallel/tbb.h b/geom_matching/wasserstein/include/dnn/parallel/tbb.h
new file mode 100644
index 0000000..4aa6805
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/parallel/tbb.h
@@ -0,0 +1,220 @@
+#ifndef PARALLEL_H
+#define PARALLEL_H
+
+#include <iostream>
+#include <vector>
+
+#include <boost/range.hpp>
+#include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+
+#ifdef TBB
+
+#include <tbb/tbb.h>
+#include <tbb/concurrent_hash_map.h>
+#include <tbb/scalable_allocator.h>
+
+#include <boost/serialization/split_free.hpp>
+#include <boost/serialization/collections_load_imp.hpp>
+#include <boost/serialization/collections_save_imp.hpp>
+
+namespace dnn
+{
+ using tbb::mutex;
+ using tbb::task_scheduler_init;
+ using tbb::task_group;
+ using tbb::task;
+
+ template<class T>
+ struct vector
+ {
+ typedef tbb::concurrent_vector<T> type;
+ };
+
+ template<class T>
+ struct atomic
+ {
+ typedef tbb::atomic<T> type;
+ static T compare_and_swap(type& v, T n, T o) { return v.compare_and_swap(n,o); }
+ };
+
+ template<class Iterator, class F>
+ void do_foreach(Iterator begin, Iterator end, const F& f) { tbb::parallel_do(begin, end, f); }
+
+ template<class Range, class F>
+ void for_each_range_(const Range& r, const F& f)
+ {
+ for (typename Range::iterator cur = r.begin(); cur != r.end(); ++cur)
+ f(*cur);
+ }
+
+ template<class F>
+ void for_each_range(size_t from, size_t to, const F& f)
+ {
+ //static tbb::affinity_partitioner ap;
+ //tbb::parallel_for(c.range(), boost::bind(&for_each_range_<typename Container::range_type, F>, _1, f), ap);
+ tbb::parallel_for(from, to, f);
+ }
+
+ template<class Container, class F>
+ void for_each_range(const Container& c, const F& f)
+ {
+ //static tbb::affinity_partitioner ap;
+ //tbb::parallel_for(c.range(), boost::bind(&for_each_range_<typename Container::range_type, F>, _1, f), ap);
+ tbb::parallel_for(c.range(), boost::bind(&for_each_range_<typename Container::const_range_type, F>, _1, f));
+ }
+
+ template<class Container, class F>
+ void for_each_range(Container& c, const F& f)
+ {
+ //static tbb::affinity_partitioner ap;
+ //tbb::parallel_for(c.range(), boost::bind(&for_each_range_<typename Container::range_type, F>, _1, f), ap);
+ tbb::parallel_for(c.range(), boost::bind(&for_each_range_<typename Container::range_type, F>, _1, f));
+ }
+
+ template<class ID, class NodePointer, class IDTraits, class Allocator>
+ struct map_traits
+ {
+ typedef tbb::concurrent_hash_map<ID, NodePointer, IDTraits, Allocator> type;
+ typedef typename type::range_type range;
+ };
+
+ struct progress_timer
+ {
+ progress_timer(): start(tbb::tick_count::now()) {}
+ ~progress_timer()
+ { std::cout << (tbb::tick_count::now() - start).seconds() << " s" << std::endl; }
+
+ tbb::tick_count start;
+ };
+}
+
+// Serialization for tbb::concurrent_vector<...>
+namespace boost
+{
+ namespace serialization
+ {
+ template<class Archive, class T, class A>
+ void save(Archive& ar, const tbb::concurrent_vector<T,A>& v, const unsigned int file_version)
+ { stl::save_collection(ar, v); }
+
+ template<class Archive, class T, class A>
+ void load(Archive& ar, tbb::concurrent_vector<T,A>& v, const unsigned int file_version)
+ {
+ stl::load_collection<Archive,
+ tbb::concurrent_vector<T,A>,
+ stl::archive_input_seq< Archive, tbb::concurrent_vector<T,A> >,
+ stl::reserve_imp< tbb::concurrent_vector<T,A> >
+ >(ar, v);
+ }
+
+ template<class Archive, class T, class A>
+ void serialize(Archive& ar, tbb::concurrent_vector<T,A>& v, const unsigned int file_version)
+ { split_free(ar, v, file_version); }
+
+ template<class Archive, class T>
+ void save(Archive& ar, const tbb::atomic<T>& v, const unsigned int file_version)
+ { T v_ = v; ar << v_; }
+
+ template<class Archive, class T>
+ void load(Archive& ar, tbb::atomic<T>& v, const unsigned int file_version)
+ { T v_; ar >> v_; v = v_; }
+
+ template<class Archive, class T>
+ void serialize(Archive& ar, tbb::atomic<T>& v, const unsigned int file_version)
+ { split_free(ar, v, file_version); }
+ }
+}
+
+#else
+
+#include <algorithm>
+#include <map>
+#include <boost/progress.hpp>
+
+namespace dnn
+{
+ template<class T>
+ struct vector
+ {
+ typedef ::std::vector<T> type;
+ };
+
+ template<class T>
+ struct atomic
+ {
+ typedef T type;
+ static T compare_and_swap(type& v, T n, T o) { if (v != o) return v; v = n; return o; }
+ };
+
+ template<class Iterator, class F>
+ void do_foreach(Iterator begin, Iterator end, const F& f) { std::for_each(begin, end, f); }
+
+ template<class F>
+ void for_each_range(size_t from, size_t to, const F& f)
+ {
+ for (size_t i = from; i < to; ++i)
+ f(i);
+ }
+
+ template<class Container, class F>
+ void for_each_range(Container& c, const F& f)
+ {
+ BOOST_FOREACH(const typename Container::value_type& i, c)
+ f(i);
+ }
+
+ template<class Container, class F>
+ void for_each_range(const Container& c, const F& f)
+ {
+ BOOST_FOREACH(const typename Container::value_type& i, c)
+ f(i);
+ }
+
+ struct mutex
+ {
+ struct scoped_lock
+ {
+ scoped_lock() {}
+ scoped_lock(mutex& ) {}
+ void acquire(mutex& ) const {}
+ void release() const {}
+ };
+ };
+
+ struct task_scheduler_init
+ {
+ task_scheduler_init(unsigned) {}
+ void initialize(unsigned) {}
+ static const unsigned automatic = 0;
+ static const unsigned deferred = 0;
+ };
+
+ struct task_group
+ {
+ template<class Functor>
+ void run(const Functor& f) const { f(); }
+ void wait() const {}
+ };
+
+ template<class ID, class NodePointer, class IDTraits, class Allocator>
+ struct map_traits
+ {
+ typedef std::map<ID, NodePointer,
+ typename IDTraits::Comparison,
+ Allocator> type;
+ typedef type range;
+ };
+
+ using boost::progress_timer;
+}
+
+#endif // TBB
+
+namespace dnn
+{
+ template<class Range, class F>
+ void do_foreach(const Range& range, const F& f) { do_foreach(boost::begin(range), boost::end(range), f); }
+}
+
+#endif
diff --git a/geom_matching/wasserstein/include/dnn/parallel/utils.h b/geom_matching/wasserstein/include/dnn/parallel/utils.h
new file mode 100644
index 0000000..ba73814
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/parallel/utils.h
@@ -0,0 +1,94 @@
+#ifndef PARALLEL_UTILS_H
+#define PARALLEL_UTILS_H
+
+#include "../utils.h"
+
+namespace dnn
+{
+ // Assumes rng is synchronized across ranks
+ template<class DataVector, class RNGType, class SwapFunctor>
+ void shuffle(mpi::communicator& world, DataVector& data, RNGType& rng, const SwapFunctor& swap, DataVector empty = DataVector());
+
+ template<class DataVector, class RNGType>
+ void shuffle(mpi::communicator& world, DataVector& data, RNGType& rng)
+ {
+ typedef decltype(data[0]) T;
+ shuffle(world, data, rng, [](T& x, T& y) { std::swap(x,y); });
+ }
+}
+
+template<class DataVector, class RNGType, class SwapFunctor>
+void
+dnn::shuffle(mpi::communicator& world, DataVector& data, RNGType& rng, const SwapFunctor& swap, DataVector empty)
+{
+ // This is not a perfect shuffle: it dishes out data in chunks of 1/size.
+ // (It can be interpreted as generating a bistochastic matrix by taking the
+ // sum of size random permutation matrices.) Hopefully, it works for our purposes.
+
+ typedef typename RNGType::result_type RNGResult;
+
+ int size = world.size();
+ int rank = world.rank();
+
+ // Generate local seeds
+ boost::uniform_int<RNGResult> uniform;
+ RNGResult seed;
+ for (size_t i = 0; i < size; ++i)
+ {
+ RNGResult v = uniform(rng);
+ if (i == rank)
+ seed = v;
+ }
+ RNGType local_rng(seed);
+
+ // Shuffle local data
+ dnn::random_shuffle(data.begin(), data.end(), local_rng, swap);
+
+ // Decide how much of our data goes to i-th processor
+ std::vector<size_t> out_counts(size);
+ std::vector<int> ranks(boost::counting_iterator<int>(0),
+ boost::counting_iterator<int>(size));
+ for (size_t i = 0; i < size; ++i)
+ {
+ dnn::random_shuffle(ranks.begin(), ranks.end(), rng);
+ ++out_counts[ranks[rank]];
+ }
+
+ // Fill the outgoing array
+ size_t total = 0;
+ std::vector< DataVector > outgoing(size, empty);
+ for (size_t i = 0; i < size; ++i)
+ {
+ size_t count = data.size()*out_counts[i]/size;
+ if (total + count > data.size())
+ count = data.size() - total;
+
+ outgoing[i].reserve(count);
+ for (size_t j = total; j < total + count; ++j)
+ outgoing[i].push_back(data[j]);
+
+ total += count;
+ }
+
+ boost::uniform_int<size_t> uniform_outgoing(0,size-1); // in range [0,size-1]
+ while(total < data.size()) // send leftover to random processes
+ {
+ outgoing[uniform_outgoing(local_rng)].push_back(data[total]);
+ ++total;
+ }
+ data.clear();
+
+ // Exchange the data
+ std::vector< DataVector > incoming(size, empty);
+ mpi::all_to_all(world, outgoing, incoming);
+ outgoing.clear();
+
+ // Assemble our data
+ for(const DataVector& vec : incoming)
+ for (size_t i = 0; i < vec.size(); ++i)
+ data.push_back(vec[i]);
+ dnn::random_shuffle(data.begin(), data.end(), local_rng, swap);
+ // XXX: the final shuffle is irrelevant for our purposes. But it's also cheap.
+}
+
+#endif
diff --git a/geom_matching/wasserstein/include/dnn/utils.h b/geom_matching/wasserstein/include/dnn/utils.h
new file mode 100644
index 0000000..83c2865
--- /dev/null
+++ b/geom_matching/wasserstein/include/dnn/utils.h
@@ -0,0 +1,41 @@
+#ifndef DNN_UTILS_H
+#define DNN_UTILS_H
+
+#include <boost/random/uniform_int.hpp>
+#include <boost/foreach.hpp>
+#include <boost/typeof/typeof.hpp>
+
+namespace dnn
+{
+
+template <typename T, typename... Args>
+struct has_coordinates
+{
+ template <typename C, typename = decltype( std::declval<C>().coordinate(std::declval<Args>()...) )>
+ static std::true_type test(int);
+
+ template <typename C>
+ static std::false_type test(...);
+
+ static constexpr bool value = decltype(test<T>(0))::value;
+};
+
+template<class RandomIt, class UniformRandomNumberGenerator, class SwapFunctor>
+void random_shuffle(RandomIt first, RandomIt last, UniformRandomNumberGenerator& g, const SwapFunctor& swap)
+{
+ size_t n = last - first;
+ boost::uniform_int<size_t> uniform(0,n);
+ for (size_t i = n-1; i > 0; --i)
+ swap(first[i], first[uniform(g,i+1)]); // picks a random number in [0,i] range
+}
+
+template<class RandomIt, class UniformRandomNumberGenerator>
+void random_shuffle(RandomIt first, RandomIt last, UniformRandomNumberGenerator& g)
+{
+ typedef decltype(*first) T;
+ random_shuffle(first, last, g, [](T& x, T& y) { std::swap(x,y); });
+}
+
+}
+
+#endif
diff --git a/geom_matching/wasserstein/include/wasserstein.h b/geom_matching/wasserstein/include/wasserstein.h
new file mode 100644
index 0000000..38ac6bd
--- /dev/null
+++ b/geom_matching/wasserstein/include/wasserstein.h
@@ -0,0 +1,110 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#ifndef WASSERSTEIN_H
+#define WASSERSTEIN_H
+
+#include <vector>
+#include <map>
+#include <math.h>
+
+#include "basic_defs_ws.h"
+
+// use Gauss-Seidel version; comment out to switch to Jacobi (not recommended)
+#define GAUSS_SEIDEL_AUCTION
+
+namespace geom_ws {
+
+// get Wasserstein distance between two persistence diagrams
+double wassersteinDistVec(const std::vector<DiagramPoint>& A,
+ const std::vector<DiagramPoint>& B,
+ const double q,
+ const double delta,
+ const double _internal_p = std::numeric_limits<double>::infinity(),
+ const double _initialEpsilon = 0.0,
+ const double _epsFactor = 0.0);
+
+
+// compare as multisets
+template<class PairContainer>
+bool areEqual(PairContainer& dgm1, PairContainer& dgm2)
+{
+ if (dgm1.size() != dgm2.size()) {
+ return false;
+ }
+
+ std::map<std::pair<double, double>, int> m1, m2;
+
+ for(const auto& pair1 : dgm1) {
+ m1[pair1]++;
+ }
+
+ for(const auto& pair2 : dgm2) {
+ m2[pair2]++;
+ }
+
+ return m1 == m2;
+}
+
+template<class PairContainer>
+double wassersteinDist(PairContainer& A, PairContainer& B, const double q, const double delta, const double _internal_p = std::numeric_limits<double>::infinity(), const double _initialEpsilon = 0.0, const double _epsFactor = 0.0)
+{
+ if (areEqual(A, B)) {
+ return 0.0;
+ }
+
+ std::vector<DiagramPoint> dgmA, dgmB;
+ // loop over A, add projections of A-points to corresponding positions
+ // in B-vector
+ for(auto& pairA : A) {
+ double x = pairA.first;
+ double y = pairA.second;
+ dgmA.push_back(DiagramPoint(x, y, DiagramPoint::NORMAL));
+ dgmB.push_back(DiagramPoint(x, y, DiagramPoint::DIAG));
+ }
+ // the same for B
+ for(auto& pairB : B) {
+ double x = pairB.first;
+ double y = pairB.second;
+ dgmA.push_back(DiagramPoint(x, y, DiagramPoint::DIAG));
+ dgmB.push_back(DiagramPoint(x, y, DiagramPoint::NORMAL));
+ }
+
+ return wassersteinDistVec(dgmA, dgmB, q, delta, _internal_p, _initialEpsilon, _epsFactor);
+}
+
+
+// fill in result with points from file fname
+// return false if file can't be opened
+// or error occurred while reading
+bool readDiagramPointSet(const char* fname, std::vector<std::pair<double, double>>& result);
+bool readDiagramPointSet(const std::string& fname, std::vector<std::pair<double, double>>& result);
+
+} // end of namespace geom_ws
+
+#endif
diff --git a/geom_matching/wasserstein/src/auction_oracle.cpp b/geom_matching/wasserstein/src/auction_oracle.cpp
new file mode 100644
index 0000000..f906d26
--- /dev/null
+++ b/geom_matching/wasserstein/src/auction_oracle.cpp
@@ -0,0 +1,1310 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#include <assert.h>
+#include <algorithm>
+#include <functional>
+#include <iterator>
+
+#include "def_debug.h"
+#include "auction_oracle.h"
+
+namespace geom_ws {
+
+AuctionOracleAbstract::AuctionOracleAbstract(const std::vector<DiagramPoint>& _bidders, const std::vector<DiagramPoint>& _items, const double _wassersteinPower, const double _internal_p) :
+ bidders(_bidders),
+ items(_items),
+ prices(items.size(), 0.0),
+ wassersteinPower(_wassersteinPower),
+ internal_p(_internal_p)
+{
+}
+
+double AuctionOracleAbstract::getValueForBidder(size_t bidderIdx, size_t itemIdx)
+{
+ return pow(distLp(bidders[bidderIdx], items[itemIdx], internal_p), wassersteinPower) + prices[itemIdx];
+}
+
+// *****************************
+// AuctionOracleLazyHeap
+// *****************************
+
+AuctionOracleLazyHeap::AuctionOracleLazyHeap(const std::vector<DiagramPoint>& b,
+ const std::vector<DiagramPoint>& g,
+ double _wassersteinPower,
+ const double internal_p) :
+ AuctionOracleAbstract(b, g, _wassersteinPower, internal_p),
+ maxVal(std::numeric_limits<double>::min()),
+ biddersUpdateMoments(b.size(), 0),
+ updateCounter(0)
+{
+ assert(b.size() == g.size() );
+ assert(b.size() > 1);
+
+ weightMatrix.reserve(b.size());
+ //const double maxDistUpperBound = 3 * getFurthestDistance3Approx(b, g);
+ //weightAdjConst = pow(maxDistUpperBound, wassersteinPower);
+ //std::cout << "3getFurthestDistance3Approx = " << getFurthestDistance3Approx(b, g) << std::endl;
+ //std::cout << "in AuctionOracleLazyHeap weightAdjConst = " << weightAdjConst << std::endl;
+ // init weight matrix
+ for(const auto& pointA : bidders) {
+ std::vector<double> weightVec;
+ weightVec.clear();
+ weightVec.reserve(b.size());
+ for(const auto& pointB : items) {
+ double val = pow(distLp(pointA, pointB, internal_p), wassersteinPower);
+ if (val > maxVal) {
+ maxVal = val;
+ }
+ weightVec.push_back( val );
+ }
+ weightMatrix.push_back(weightVec);
+ }
+ fillInLossesHeap();
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ updateList.push_back(std::make_pair(static_cast<IdxType>(itemIdx), 0));
+ }
+ for(auto updateListIter = updateList.begin(); updateListIter != updateList.end(); ++updateListIter) {
+ itemsIterators.push_back(updateListIter);
+ }
+}
+
+void AuctionOracleLazyHeap::updateQueueForBidder(IdxType bidderIdx)
+{
+ assert(0 <= bidderIdx and bidderIdx < static_cast<int>(bidders.size()));
+ assert(bidderIdx < static_cast<int>(biddersUpdateMoments.size()));
+
+ int bidderLastUpdateTime = biddersUpdateMoments[bidderIdx];
+ auto iter = updateList.begin();
+ while (iter != updateList.end() and iter->second >= bidderLastUpdateTime) {
+ IdxType itemIdx = iter->first;
+ IdxValPair newVal { itemIdx, weightMatrix[bidderIdx][itemIdx] + prices[itemIdx]};
+ // to-do: change indexing of lossesHeapHandles
+ lossesHeap[bidderIdx]->decrease(lossesHeapHandles[bidderIdx][itemIdx], newVal);
+ iter++;
+ }
+ biddersUpdateMoments[bidderIdx] = updateCounter;
+}
+
+void AuctionOracleLazyHeap::fillInLossesHeap(void)
+{
+ for(size_t bidderIdx = 0; bidderIdx < bidders.size(); ++bidderIdx) {
+ lossesHeap.push_back( new LossesHeap() );
+ std::vector<LossesHeap::handle_type> handlesVec;
+ lossesHeapHandles.push_back(handlesVec);
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ IdxValPair vp { itemIdx, weightMatrix[bidderIdx][itemIdx] + prices[itemIdx] };
+ lossesHeapHandles[bidderIdx].push_back( lossesHeap[bidderIdx]->push(vp) );
+ }
+ }
+}
+
+AuctionOracleLazyHeap::~AuctionOracleLazyHeap()
+{
+ for(auto h : lossesHeap) {
+ delete h;
+ }
+}
+
+void AuctionOracleLazyHeap::setPrice(IdxType itemIdx, double newPrice)
+{
+ assert( prices.at(itemIdx) < newPrice );
+#ifdef DEBUG_AUCTION
+ std::cout << "price incremented by " << prices.at(itemIdx) - newPrice << std::endl;
+#endif
+ prices[itemIdx] = newPrice;
+ // lazy: record the moment we updated the price of the items,
+ // do not update queues.
+ // 1. move the items with updated price to the front of the updateList,
+ updateList.splice(updateList.begin(), updateList, itemsIterators[itemIdx]);
+ // 2. record the moment we updated the price and increase the counter
+ updateList.front().second = updateCounter++;
+}
+
+// subtract min. price from all prices
+void AuctionOracleLazyHeap::adjustPrices(void)
+{
+ double minPrice = *(std::min_element(prices.begin(), prices.end()));
+ std::transform(prices.begin(), prices.end(), prices.begin(), [minPrice](double a) { return a - minPrice; });
+}
+
+DebugOptimalBid AuctionOracleLazyHeap::getOptimalBidDebug(IdxType bidderIdx)
+{
+ assert(bidderIdx >=0 and bidderIdx < static_cast<IdxType>(bidders.size()) );
+ assert(lossesHeap.at(bidderIdx) != nullptr);
+ assert(lossesHeap[bidderIdx]->size() >= 2);
+
+ updateQueueForBidder(bidderIdx);
+ DebugOptimalBid result;
+
+ auto pHeap = lossesHeap[bidderIdx];
+ auto topIter = pHeap->ordered_begin();
+ result.bestItemIdx = topIter->first;
+ result.bestItemValue = topIter->second;
+ ++topIter; // now points to the second-best items
+ result.secondBestItemValue = topIter->second;
+ result.secondBestItemIdx = topIter->first;
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+
+ return result;
+}
+
+IdxValPair AuctionOracleLazyHeap::getOptimalBid(const IdxType bidderIdx)
+{
+ assert(bidderIdx >=0 and bidderIdx < static_cast<IdxType>(bidders.size()) );
+ assert(lossesHeap.at(bidderIdx) != nullptr);
+ assert(lossesHeap[bidderIdx]->size() >= 2);
+
+ updateQueueForBidder(bidderIdx);
+
+ auto pHeap = lossesHeap[bidderIdx];
+ auto topIter = pHeap->ordered_begin();
+ IdxType bestItemIdx = topIter->first;
+ double bestItemValue = topIter->second;
+ ++topIter; // now points to the second-best items
+ double secondBestItemValue = topIter->second;
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+
+ // bid value: price + value difference + epsilon
+ return std::make_pair(bestItemIdx,
+ prices[bestItemIdx] +
+ ( secondBestItemValue - bestItemValue ) +
+ epsilon );
+}
+
+// *****************************
+// AuctionOracleLazyHeapRestricted
+// *****************************
+
+AuctionOracleLazyHeapRestricted::AuctionOracleLazyHeapRestricted(const std::vector<DiagramPoint>& b,
+ const std::vector<DiagramPoint>& g,
+ double _wassersteinPower,
+ double internal_p) :
+ AuctionOracleAbstract(b, g, _wassersteinPower),
+ maxVal(std::numeric_limits<double>::min()),
+ biddersUpdateMoments(b.size(), 0),
+ updateCounter(0),
+ heapHandlesIndices(items.size(), std::numeric_limits<size_t>::max()),
+ bestDiagonalItemsComputed(false)
+{
+ assert(b.size() == g.size() );
+ assert(b.size() > 1);
+
+ weightMatrix.reserve(b.size());
+ //const double maxDistUpperBound = 3 * getFurthestDistance3Approx(b, g);
+ //weightAdjConst = pow(maxDistUpperBound, wassersteinPower);
+ //std::cout << "3getFurthestDistance3Approx = " << getFurthestDistance3Approx(b, g) << std::endl;
+ //std::cout << "in AuctionOracleLazyHeapRestricted weightAdjConst = " << weightAdjConst << std::endl;
+ // init weight matrix
+ for(const auto& pointA : bidders) {
+ std::vector<double> weightVec;
+ weightVec.clear();
+ weightVec.reserve(b.size());
+ for(const auto& pointB : items) {
+ double val = pow(distLp(pointA, pointB, internal_p), wassersteinPower);
+ weightVec.push_back( val );
+ if ( val > maxVal )
+ maxVal = val;
+ }
+ weightMatrix.push_back(weightVec);
+ }
+ fillInLossesHeap();
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ updateList.push_back(std::make_pair(static_cast<IdxType>(itemIdx), 0));
+ }
+ for(auto updateListIter = updateList.begin(); updateListIter != updateList.end(); ++updateListIter) {
+ itemsIterators.push_back(updateListIter);
+ }
+
+ size_t handleIdx {0};
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ if (items[itemIdx].isDiagonal() ) {
+ heapHandlesIndices[itemIdx] = handleIdx++;
+ diagHeapHandles.push_back(diagItemsHeap.push(std::make_pair(itemIdx, 0)));
+ }
+ }
+ // todo: this must be done in readFiles procedure
+}
+
+void AuctionOracleLazyHeapRestricted::updateQueueForBidder(IdxType bidderIdx)
+{
+ assert(0 <= bidderIdx and bidderIdx < static_cast<int>(bidders.size()));
+ assert(bidderIdx < static_cast<int>(biddersUpdateMoments.size()));
+ assert(lossesHeap[bidderIdx] != nullptr );
+
+ int bidderLastUpdateTime = biddersUpdateMoments[bidderIdx];
+ auto iter = updateList.begin();
+ while (iter != updateList.end() and iter->second >= bidderLastUpdateTime) {
+ IdxType itemIdx = iter->first;
+ size_t handleIdx = itemsIndicesForHeapHandles[bidderIdx][itemIdx];
+ if (handleIdx < items.size() ) {
+ IdxValPair newVal { itemIdx, weightMatrix[bidderIdx][itemIdx] + prices[itemIdx]};
+ // to-do: change indexing of lossesHeapHandles
+ lossesHeap[bidderIdx]->decrease(lossesHeapHandles[bidderIdx][handleIdx], newVal);
+ }
+ iter++;
+ }
+ biddersUpdateMoments[bidderIdx] = updateCounter;
+}
+
+void AuctionOracleLazyHeapRestricted::fillInLossesHeap(void)
+{
+ for(size_t bidderIdx = 0; bidderIdx < bidders.size(); ++bidderIdx) {
+ DiagramPoint bidder { bidders[bidderIdx] };
+ // no heap for diagonal bidders
+ if ( bidder.isDiagonal() ) {
+ lossesHeap.push_back( nullptr );
+ lossesHeapHandles.push_back(std::vector<LossesHeap::handle_type>());
+ itemsIndicesForHeapHandles.push_back( std::vector<size_t>() );
+ continue;
+ } else {
+ lossesHeap.push_back( new LossesHeap() );
+ assert( lossesHeap.at(bidderIdx) != nullptr );
+ itemsIndicesForHeapHandles.push_back( std::vector<size_t>(items.size(), std::numeric_limits<size_t>::max() ) );
+
+ std::vector<LossesHeap::handle_type> handlesVec;
+ lossesHeapHandles.push_back(handlesVec);
+ size_t handleIdx { 0 };
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ assert( itemsIndicesForHeapHandles.at(bidderIdx).at(itemIdx) > 0 );
+ DiagramPoint item { items[itemIdx] };
+ if ( item.isNormal() ) {
+ // item can be assigned to bidder, store in heap
+ IdxValPair vp { itemIdx, weightMatrix[bidderIdx][itemIdx] + prices[itemIdx] };
+ lossesHeapHandles[bidderIdx].push_back( lossesHeap[bidderIdx]->push(vp) );
+ // keep corresponding index in itemsIndicesForHeapHandles
+ itemsIndicesForHeapHandles[bidderIdx][itemIdx] = handleIdx++;
+ }
+ }
+ }
+ }
+}
+
+AuctionOracleLazyHeapRestricted::~AuctionOracleLazyHeapRestricted()
+{
+ for(auto h : lossesHeap) {
+ delete h;
+ }
+}
+
+void AuctionOracleLazyHeapRestricted::setPrice(IdxType itemIdx, double newPrice)
+{
+ assert( prices.at(itemIdx) < newPrice );
+#ifdef DEBUG_AUCTION
+ std::cout << "price incremented by " << prices.at(itemIdx) - newPrice << std::endl;
+#endif
+ prices[itemIdx] = newPrice;
+ if (items[itemIdx].isNormal() ) {
+ // lazy: record the moment we updated the price of the items,
+ // do not update queues.
+ // 1. move the items with updated price to the front of the updateList,
+ updateList.splice(updateList.begin(), updateList, itemsIterators[itemIdx]);
+ // 2. record the moment we updated the price and increase the counter
+ updateList.front().second = updateCounter++;
+ } else {
+ // diagonal items are stored in one heap
+ diagItemsHeap.decrease(diagHeapHandles[heapHandlesIndices[itemIdx]], std::make_pair(itemIdx, newPrice));
+ bestDiagonalItemsComputed = false;
+ }
+}
+
+// subtract min. price from all prices
+void AuctionOracleLazyHeapRestricted::adjustPrices(void)
+{
+}
+
+DebugOptimalBid AuctionOracleLazyHeapRestricted::getOptimalBidDebug(IdxType bidderIdx)
+{
+ DebugOptimalBid result;
+ assert(bidderIdx >=0 and bidderIdx < static_cast<IdxType>(bidders.size()) );
+
+ DiagramPoint bidder = bidders[bidderIdx];
+ std::vector<IdxValPair> candItems;
+ // corresponding point is always considered as a candidate
+
+ size_t projItemIdx = bidderIdx;
+ assert( 0 <= projItemIdx and projItemIdx < items.size() );
+ DiagramPoint projItem = items[projItemIdx];
+ assert(projItem.type != bidder.type);
+ //assert(projItem.projId == bidder.id);
+ //assert(projItem.id == bidder.projId);
+ // todo: store precomputed distance?
+ double projItemValue = pow(distLp(bidder, projItem, internal_p), wassersteinPower) + prices[projItemIdx];
+ candItems.push_back( std::make_pair(projItemIdx, projItemValue) );
+
+ if (bidder.isNormal()) {
+ assert(lossesHeap.at(bidderIdx) != nullptr);
+ assert(lossesHeap[bidderIdx]->size() >= 2);
+ updateQueueForBidder(bidderIdx);
+ auto pHeap = lossesHeap[bidderIdx];
+ assert( pHeap != nullptr );
+ auto topIter = pHeap->ordered_begin();
+ candItems.push_back( *topIter );
+ ++topIter; // now points to the second-best items
+ candItems.push_back( *topIter );
+ std::sort(candItems.begin(), candItems.end(), CompPairsBySecondStruct());
+ assert(candItems[1].second >= candItems[0].second);
+ } else {
+ // for diagonal bidder the only normal point has already been added
+ // the other 2 candidates are diagonal items only, get from the heap
+ // with prices
+ assert(diagItemsHeap.size() > 1);
+ auto topDiagIter = diagItemsHeap.ordered_begin();
+ auto topDiag1 = *topDiagIter++;
+ auto topDiag2 = *topDiagIter;
+ candItems.push_back(topDiag1);
+ candItems.push_back(topDiag2);
+ std::sort(candItems.begin(), candItems.end(), CompPairsBySecondStruct());
+ assert(candItems.size() == 3);
+ assert(candItems[2].second >= candItems[1].second);
+ assert(candItems[1].second >= candItems[0].second);
+ }
+
+ result.bestItemIdx = candItems[0].first;
+ result.secondBestItemIdx = candItems[1].first;
+ result.bestItemValue = candItems[0].second;
+ result.secondBestItemValue = candItems[1].second;
+
+ // checking code
+
+ //DebugOptimalBid debugMyResult(result);
+ //DebugOptimalBid debugNaiveResult;
+ //debugNaiveResult.bestItemValue = 1e20;
+ //debugNaiveResult.secondBestItemValue = 1e20;
+ //double currItemValue;
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if ( bidders[bidderIdx].type != items[itemIdx].type and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ //currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ //if (currItemValue < debugNaiveResult.bestItemValue) {
+ //debugNaiveResult.bestItemValue = currItemValue;
+ //debugNaiveResult.bestItemIdx = itemIdx;
+ //}
+ //}
+
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if (itemIdx == debugNaiveResult.bestItemIdx) {
+ //continue;
+ //}
+ //if ( bidders[bidderIdx].type != items[itemIdx].type and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ //currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ //if (currItemValue < debugNaiveResult.secondBestItemValue) {
+ //debugNaiveResult.secondBestItemValue = currItemValue;
+ //debugNaiveResult.secondBestItemIdx = itemIdx;
+ //}
+ //}
+ ////std::cout << "got naive result" << std::endl;
+
+ //if ( fabs( debugMyResult.bestItemValue - debugNaiveResult.bestItemValue ) > 1e-6 or
+ //fabs( debugNaiveResult.secondBestItemValue - debugMyResult.secondBestItemValue) > 1e-6 ) {
+ //std::cerr << "bidderIdx = " << bidderIdx << "; ";
+ //std::cerr << bidders[bidderIdx] << std::endl;
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //std::cout << itemIdx << ": " << items[itemIdx] << "; price = " << prices[itemIdx] << std::endl;
+ //}
+ //std::cerr << "debugMyResult: " << debugMyResult << std::endl;
+ //std::cerr << "debugNaiveResult: " << debugNaiveResult << std::endl;
+ //auto pHeap = lossesHeap[bidderIdx];
+ //assert( pHeap != nullptr );
+ //for(auto topIter = pHeap->ordered_begin(); topIter != pHeap->ordered_end(); ++topIter) {
+ //std::cerr << "in heap: " << topIter->first << ": " << topIter->second << "; real value = " << distLp(bidder, items[topIter->first]) + prices[topIter->first] << std::endl;
+ //}
+ //for(auto ci : candItems) {
+ //std::cout << "ci.idx = " << ci.first << ", value = " << ci.second << std::endl;
+ //}
+
+ ////std::cerr << "twoBestItems: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ //assert(false);
+ //}
+
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+
+ return result;
+}
+
+IdxValPair AuctionOracleLazyHeapRestricted::getOptimalBid(const IdxType bidderIdx)
+{
+ IdxType bestItemIdx;
+ //IdxType secondBestItemIdx;
+ double bestItemValue;
+ double secondBestItemValue;
+
+ auto& bidder = bidders[bidderIdx];
+ IdxType projItemIdx = bidderIdx;
+ assert( 0 <= projItemIdx and projItemIdx < items.size() );
+ DiagramPoint projItem = items[projItemIdx];
+ assert(projItem.type != bidder.type);
+ //assert(projItem.projId == bidder.id);
+ //assert(projItem.id == bidder.projId);
+ // todo: store precomputed distance?
+ double projItemValue = pow(distLp(bidder, projItem, internal_p), wassersteinPower) + prices[projItemIdx];
+
+ if (bidder.isDiagonal()) {
+ // for diagonal bidder the only normal point has already been added
+ // the other 2 candidates are diagonal items only, get from the heap
+ // with prices
+ assert(diagItemsHeap.size() > 1);
+ if (!bestDiagonalItemsComputed) {
+ auto topDiagIter = diagItemsHeap.ordered_begin();
+ bestDiagonalItemIdx = topDiagIter->first;
+ bestDiagonalItemValue = topDiagIter->second;
+ topDiagIter++;
+ secondBestDiagonalItemIdx = topDiagIter->first;
+ secondBestDiagonalItemValue = topDiagIter->second;
+ bestDiagonalItemsComputed = true;
+ }
+
+ if ( projItemValue < bestDiagonalItemValue) {
+ bestItemIdx = projItemIdx;
+ bestItemValue = projItemValue;
+ secondBestItemValue = bestDiagonalItemValue;
+ //secondBestItemIdx = bestDiagonalItemIdx;
+ } else if (projItemValue < secondBestDiagonalItemValue) {
+ bestItemIdx = bestDiagonalItemIdx;
+ bestItemValue = bestDiagonalItemValue;
+ secondBestItemValue = projItemValue;
+ //secondBestItemIdx = projItemIdx;
+ } else {
+ bestItemIdx = bestDiagonalItemIdx;
+ bestItemValue = bestDiagonalItemValue;
+ secondBestItemValue = secondBestDiagonalItemValue;
+ //secondBestItemIdx = secondBestDiagonalItemIdx;
+ }
+ } else {
+ // for normal bidder get 2 best items among non-diagonal (=normal) points
+ // from the corresponding heap
+ assert(diagItemsHeap.size() > 1);
+ updateQueueForBidder(bidderIdx);
+ auto topNormIter = lossesHeap[bidderIdx]->ordered_begin();
+ IdxType bestNormalItemIdx { topNormIter->first };
+ double bestNormalItemValue { topNormIter->second };
+ topNormIter++;
+ double secondBestNormalItemValue { topNormIter->second };
+ //IdxType secondBestNormalItemIdx { topNormIter->first };
+
+ if ( projItemValue < bestNormalItemValue) {
+ bestItemIdx = projItemIdx;
+ bestItemValue = projItemValue;
+ secondBestItemValue = bestNormalItemValue;
+ //secondBestItemIdx = bestNormalItemIdx;
+ } else if (projItemValue < secondBestNormalItemValue) {
+ bestItemIdx = bestNormalItemIdx;
+ bestItemValue = bestNormalItemValue;
+ secondBestItemValue = projItemValue;
+ //secondBestItemIdx = projItemIdx;
+ } else {
+ bestItemIdx = bestNormalItemIdx;
+ bestItemValue = bestNormalItemValue;
+ secondBestItemValue = secondBestNormalItemValue;
+ //secondBestItemIdx = secondBestNormalItemIdx;
+ }
+ }
+
+ IdxValPair result;
+
+ assert( secondBestItemValue >= bestItemValue );
+
+ result.first = bestItemIdx;
+ result.second = ( secondBestItemValue - bestItemValue ) + prices[bestItemIdx] + epsilon;
+
+
+ // checking code
+
+ //DebugOptimalBid debugMyResult;
+ //debugMyResult.bestItemIdx = bestItemIdx;
+ //debugMyResult.bestItemValue = bestItemValue;
+ //debugMyResult.secondBestItemIdx = secondBestItemIdx;
+ //debugMyResult.secondBestItemValue = secondBestItemValue;
+ //DebugOptimalBid debugNaiveResult;
+ //debugNaiveResult.bestItemValue = 1e20;
+ //debugNaiveResult.secondBestItemValue = 1e20;
+ //double currItemValue;
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if ( bidders[bidderIdx].type != items[itemIdx].type and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ //currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ //if (currItemValue < debugNaiveResult.bestItemValue) {
+ //debugNaiveResult.bestItemValue = currItemValue;
+ //debugNaiveResult.bestItemIdx = itemIdx;
+ //}
+ //}
+
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if (itemIdx == debugNaiveResult.bestItemIdx) {
+ //continue;
+ //}
+ //if ( bidders[bidderIdx].type != items[itemIdx].type and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ //currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ //if (currItemValue < debugNaiveResult.secondBestItemValue) {
+ //debugNaiveResult.secondBestItemValue = currItemValue;
+ //debugNaiveResult.secondBestItemIdx = itemIdx;
+ //}
+ //}
+ ////std::cout << "got naive result" << std::endl;
+
+ //if ( fabs( debugMyResult.bestItemValue - debugNaiveResult.bestItemValue ) > 1e-6 or
+ //fabs( debugNaiveResult.secondBestItemValue - debugMyResult.secondBestItemValue) > 1e-6 ) {
+ //std::cerr << "bidderIdx = " << bidderIdx << "; ";
+ //std::cerr << bidders[bidderIdx] << std::endl;
+ //for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //std::cout << itemIdx << ": " << items[itemIdx] << "; price = " << prices[itemIdx] << std::endl;
+ //}
+ //std::cerr << "debugMyResult: " << debugMyResult << std::endl;
+ //std::cerr << "debugNaiveResult: " << debugNaiveResult << std::endl;
+ //auto pHeap = lossesHeap[bidderIdx];
+ //if ( pHeap != nullptr ) {
+ //for(auto topIter = pHeap->ordered_begin(); topIter != pHeap->ordered_end(); ++topIter) {
+ //std::cerr << "in heap: " << topIter->first << ": " << topIter->second << "; real value = " << distLp(bidder, items[topIter->first]) + prices[topIter->first] << std::endl;
+ //}
+ //}
+ ////for(auto ci : candItems) {
+ ////std::cout << "ci.idx = " << ci.first << ", value = " << ci.second << std::endl;
+ ////}
+
+ ////std::cerr << "twoBestItems: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ //assert(false);
+ // }
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+
+ return result;
+}
+
+
+// *****************************
+// AuctionOracleKDTree
+// *****************************
+
+AuctionOracleKDTree::AuctionOracleKDTree(const std::vector<DiagramPoint>& _bidders,
+ const std::vector<DiagramPoint>& _items,
+ double _wassersteinPower,
+ double internal_p) :
+ AuctionOracleAbstract(_bidders, _items, _wassersteinPower, internal_p),
+ heapHandlesIndices(items.size(), std::numeric_limits<size_t>::max()),
+ kdtreeItems(items.size(), std::numeric_limits<size_t>::max())
+{
+ //assert(wassersteinPower == 1.0); // temporarily, to-do: update dnn to search with any q
+ size_t dnnItemIdx { 0 };
+ size_t trueIdx { 0 };
+ dnnPoints.clear();
+ // store normal items in kd-tree
+ for(const auto& g : items) {
+ if (g.isNormal()) {
+ kdtreeItems[trueIdx] = dnnItemIdx;
+ // index of items is id of dnn-point
+ DnnPoint p(trueIdx);
+ p[0] = g.getRealX();
+ p[1] = g.getRealY();
+ dnnPoints.push_back(p);
+ assert(dnnItemIdx == dnnPoints.size() - 1);
+ dnnItemIdx++;
+ }
+ trueIdx++;
+ }
+
+ assert(dnnPoints.size() < items.size() );
+ for(size_t i = 0; i < dnnPoints.size(); ++i) {
+ dnnPointHandles.push_back(&dnnPoints[i]);
+ }
+ DnnTraits traits;
+ traits.internal_p = internal_p;
+ //std::cout << "kdtree: " << dnnPointHandles.size() << " points" << std::endl;
+ kdtree = new dnn::KDTree<DnnTraits>(traits, dnnPointHandles, wassersteinPower);
+
+ size_t dnnItemIdxAll { 0 };
+ dnnPointsAll.clear();
+ // store all items in kd-tree
+ for(const auto& g : items) {
+ DnnPoint p(dnnItemIdxAll++);
+ p[0] = g.getRealX();
+ p[1] = g.getRealY();
+ //std::cout << "to all tree: " << p[0] << ", " << p[1] << std::endl;
+ dnnPointsAll.push_back(p);
+ assert(dnnItemIdxAll == dnnPointsAll.size());
+ }
+
+ for(size_t i = 0; i < dnnPointsAll.size(); ++i) {
+ dnnPointHandlesAll.push_back(&dnnPointsAll[i]);
+ }
+ //std::cout << "kdtreeAll: " << dnnPointHandlesAll.size() << " points" << std::endl;
+ kdtreeAll = new dnn::KDTree<DnnTraits>(traits, dnnPointHandlesAll, wassersteinPower);
+
+ size_t handleIdx {0};
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ if (items[itemIdx].isDiagonal() ) {
+ heapHandlesIndices[itemIdx] = handleIdx++;
+ diagHeapHandles.push_back(diagItemsHeap.push(std::make_pair(itemIdx, 0)));
+ }
+ }
+ //to-do: remove maxVal from
+ //std::cout << "3getFurthestDistance3Approx = " << getFurthestDistance3Approx(_bidders, _items) << std::endl;
+ maxVal = 3*getFurthestDistance3Approx(_bidders, _items);
+ maxVal = pow(maxVal, wassersteinPower);
+ weightAdjConst = maxVal;
+ //std::cout << "AuctionOracleKDTree: weightAdjConst = " << weightAdjConst << std::endl;
+ //std::cout << "AuctionOracleKDTree constructor done" << std::endl;
+ // for debug
+}
+
+DebugOptimalBid AuctionOracleKDTree::getOptimalBidDebug(IdxType bidderIdx)
+{
+ DebugOptimalBid result;
+ DiagramPoint bidder = bidders[bidderIdx];
+ DnnPoint bidderDnn;
+ bidderDnn[0] = bidder.getRealX();
+ bidderDnn[1] = bidder.getRealY();
+
+ //std::cout << "bidder.x = " << bidderDnn[0] << std::endl;
+ //std::cout << "bidder.y = " << bidderDnn[1] << std::endl;
+
+ std::vector<IdxValPair> candItems;
+
+
+ if ( bidder.isDiagonal() ) {
+ //
+ auto twoBestItems = kdtree->findK(bidderDnn, 2);
+ //std::cout << "twoBestItems for non-diag: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ candItems.push_back( std::make_pair(twoBestItems[0].p->id(), twoBestItems[0].d) );
+ candItems.push_back( std::make_pair(twoBestItems[1].p->id(), twoBestItems[1].d) );
+ assert(diagItemsHeap.size() > 1);
+ auto topDiagIter = diagItemsHeap.ordered_begin();
+ auto topDiag1 = *topDiagIter++;
+ auto topDiag2 = *topDiagIter;
+ candItems.push_back(topDiag1);
+ candItems.push_back(topDiag2);
+ assert(candItems.size() == 4);
+ std::sort(candItems.begin(), candItems.end(), CompPairsBySecondStruct());
+ assert(candItems[3].second >= candItems[2].second);
+ assert(candItems[2].second >= candItems[1].second);
+ assert(candItems[1].second >= candItems[0].second);
+ } else {
+ auto twoBestItems = kdtreeAll->findK(bidderDnn, 2);
+ //std::cout << "twoBestItems for all: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ candItems.push_back( std::make_pair(twoBestItems[0].p->id(), twoBestItems[0].d) );
+ candItems.push_back( std::make_pair(twoBestItems[1].p->id(), twoBestItems[1].d) );
+ //size_t projItemIdx { biddersProjIndices.at(bidderIdx) };
+ //assert(items[projItemIdx].projId == bidder.id);
+ //double projItemValue { pow(distLp(bidder, items[projItemIdx]), wassersteinPower) + prices.at(projItemIdx) };
+ //candItems.push_back( std::make_pair(projItemIdx, projItemValue) );
+ assert(candItems.size() == 2);
+ assert(candItems[1].second >= candItems[0].second);
+ }
+
+ result.bestItemIdx = candItems[0].first;
+ result.secondBestItemIdx = candItems[1].first;
+ result.bestItemValue = candItems[0].second;
+ result.secondBestItemValue = candItems[1].second;
+ //std::cout << "got result: " << result << std::endl;
+ //double bestItemsPrice = prices[bestItemIdx];
+ //if (items[result.bestItemIdx].type == DiagramPoint::DIAG) {
+ //double bestItemValue1 = pow( distLp(bidder, items[result.bestItemIdx]), q) + prices[result.bestItemIdx];
+ //if ( fabs(result.bestItemValue - bestItemValue1) > 1e-6 ) {
+ //std::cerr << "XXX: " << result.bestItemValue << " vs " << bestItemValue1 << std::endl;
+ //result.bestItemValue = bestItemValue1;
+ //}
+
+ //}
+
+
+ // checking code
+ /*
+
+ DebugOptimalBid debugMyResult(result);
+ DebugOptimalBid debugNaiveResult;
+ debugNaiveResult.bestItemValue = 1e20;
+ debugNaiveResult.secondBestItemValue = 1e20;
+ double currItemValue;
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if ( bidders[bidderIdx].type == DiagramPoint::NORMAL and
+ //items[itemIdx].type == DiagramPoint::DIAG and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ if (currItemValue < debugNaiveResult.bestItemValue) {
+ debugNaiveResult.bestItemValue = currItemValue;
+ debugNaiveResult.bestItemIdx = itemIdx;
+ }
+ }
+
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ if (itemIdx == debugNaiveResult.bestItemIdx) {
+ continue;
+ }
+ currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ if (currItemValue < debugNaiveResult.secondBestItemValue) {
+ debugNaiveResult.secondBestItemValue = currItemValue;
+ debugNaiveResult.secondBestItemIdx = itemIdx;
+ }
+ }
+ //std::cout << "got naive result" << std::endl;
+
+ if ( fabs( debugMyResult.bestItemValue - debugNaiveResult.bestItemValue ) > 1e-6 or
+ fabs( debugNaiveResult.secondBestItemValue - debugMyResult.secondBestItemValue) > 1e-6 ) {
+ kdtreeAll->printWeights();
+ std::cerr << "bidderIdx = " << bidderIdx << "; ";
+ std::cerr << bidders[bidderIdx] << std::endl;
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ std::cout << itemIdx << ": " << items[itemIdx] << "; price = " << prices[itemIdx] << std::endl;
+ }
+ std::cerr << "debugMyResult: " << debugMyResult << std::endl;
+ std::cerr << "debugNaiveResult: " << debugNaiveResult << std::endl;
+ //std::cerr << "twoBestItems: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ assert(false);
+ }
+ //std::cout << "returning" << std::endl;
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << secondBestItemIdx << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[secondBestItemIdx] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << secondBestItemIdx << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[secondBestItemIdx] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ */
+
+ return result;
+}
+
+IdxValPair AuctionOracleKDTree::getOptimalBid(IdxType bidderIdx)
+{
+ IdxValPair result;
+ DebugOptimalBid debugMyResult = getOptimalBidDebug(bidderIdx);
+ result.first = debugMyResult.bestItemIdx;
+ result.second = ( debugMyResult.secondBestItemValue - debugMyResult.bestItemValue ) + prices[debugMyResult.bestItemIdx] + epsilon;
+ return result;
+}
+/*
+a_{ij} = d_{ij}
+value_{ij} = a_{ij} + price_j
+*/
+void AuctionOracleKDTree::setPrice(IdxType itemIdx, double newPrice)
+{
+ assert(prices.size() == items.size());
+ assert( 0 < diagHeapHandles.size() and diagHeapHandles.size() <= items.size());
+ assert(newPrice > prices.at(itemIdx));
+ prices[itemIdx] = newPrice;
+ if ( items[itemIdx].isNormal() ) {
+ //std::cout << "before increasing weight in kdtree " << std::endl;
+ //std::cout << kdtreeItems.at(itemIdx) << std::endl;
+ assert(0 <= itemIdx and itemIdx < kdtreeItems.size());
+ assert(0 <= kdtreeItems[itemIdx] and kdtreeItems[itemIdx] < dnnPointHandles.size());
+ kdtree->increase_weight( dnnPointHandles[kdtreeItems[itemIdx]], newPrice);
+ kdtreeAll->increase_weight( dnnPointHandlesAll[itemIdx], newPrice);
+ //std::cout << "after increasing weight in kdtree" << std::endl;
+ } else {
+ //std::cout << "before decreasing weight in heap" << std::endl;
+ //std::cout << "diagHeapHandles.size = " << diagHeapHandles.size() << std::endl;
+ kdtreeAll->increase_weight( dnnPointHandlesAll[itemIdx], newPrice);
+ //std::cout << "after increasing weight in kdtree" << std::endl;
+ assert(diagHeapHandles.size() > heapHandlesIndices.at(itemIdx));
+ diagItemsHeap.decrease(diagHeapHandles[heapHandlesIndices[itemIdx]], std::make_pair(itemIdx, newPrice));
+ }
+}
+
+void AuctionOracleKDTree::adjustPrices(void)
+{
+}
+
+AuctionOracleKDTree::~AuctionOracleKDTree()
+{
+ delete kdtree;
+ delete kdtreeAll;
+}
+
+void AuctionOracleKDTree::setEpsilon(double newVal)
+{
+ assert(newVal >= 0.0);
+ epsilon = newVal;
+}
+
+// *****************************
+// AuctionOracleRestricted
+// *****************************
+AuctionOracleRestricted::AuctionOracleRestricted(const std::vector<DiagramPoint>& b,
+ const std::vector<DiagramPoint>& g,
+ double _wassersteinPower,
+ double internal_p) :
+ AuctionOracleAbstract(b, g, _wassersteinPower, internal_p),
+ maxVal(0.0)
+{
+ assert(b.size() == g.size() );
+ assert(b.size() > 1);
+
+ weightMatrix.reserve(b.size());
+ for(const auto& pointA : bidders) {
+ std::vector<double> weightVec;
+ weightVec.clear();
+ weightVec.reserve(b.size());
+ for(const auto& pointB : items) {
+ double val = pow(distLp(pointA, pointB, internal_p), wassersteinPower);
+ if (val > maxVal) {
+ maxVal = val;
+ }
+ weightVec.push_back( val );
+ }
+ weightMatrix.push_back(weightVec);
+ }
+}
+
+IdxValPair AuctionOracleRestricted::getOptimalBid(const IdxType bidderIdx)
+{
+ assert(bidderIdx >=0 and bidderIdx < static_cast<IdxType>(bidders.size()) );
+
+ const auto bidder = bidders[bidderIdx];
+
+ IdxType bestItemIdx { -1 };
+ double bestItemValue { std::numeric_limits<double>::max() };
+ //IdxType secondBestItemIdx { -1 };
+ double secondBestItemValue { std::numeric_limits<double>::max() };
+
+ // find best items idx
+ for(IdxType itemIdx = 0; itemIdx < static_cast<IdxType>(items.size()); ++itemIdx) {
+ // non-diagonal point should be matched either to another
+ // non-diagonal point or to its own projection
+ if (isRestricted and bidder.isNormal() ) {
+ auto item = items[itemIdx];
+ if (item.isDiagonal() and itemIdx != bidderIdx)
+ continue;
+ }
+ auto currItemValue = weightMatrix[bidderIdx][itemIdx] + prices[itemIdx];
+ if ( currItemValue < bestItemValue ) {
+ bestItemValue = currItemValue;
+ bestItemIdx = itemIdx;
+ }
+ }
+
+ // find second best items idx and value
+
+ for(IdxType itemIdx = 0; itemIdx < static_cast<IdxType>(items.size()); ++itemIdx) {
+ // non-diagonal point should be matched either to another
+ // non-diagonal point or to its own projection
+ if (isRestricted and bidder.isNormal() ) {
+ auto itemsItem = items[itemIdx];
+ if (itemsItem.isDiagonal() and itemIdx != bidderIdx)
+ continue;
+ }
+
+ if (static_cast<IdxType>(itemIdx) == bestItemIdx)
+ continue;
+
+ auto currItemValue = weightMatrix[bidderIdx][itemIdx] + prices[itemIdx];
+ if ( currItemValue < secondBestItemValue ) {
+ secondBestItemValue = currItemValue;
+ //secondBestItemIdx = itemIdx;
+ }
+ }
+
+ assert(bestItemValue <= secondBestItemValue);
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << secondBestItemIdx << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[secondBestItemIdx] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << topIter->first << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[topIter->first] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+
+ // bid value: price + value difference + epsilon
+
+ return std::make_pair(bestItemIdx,
+ prices[bestItemIdx] +
+ ( -bestItemValue + secondBestItemValue ) +
+ epsilon );
+}
+
+void AuctionOracleRestricted::setPrice(const IdxType itemIdx, const double newPrice)
+{
+ assert(prices.at(itemIdx) < newPrice );
+ prices[itemIdx] = newPrice;
+}
+
+// *****************************
+// AuctionOracleKDTreeRestricted
+// *****************************
+
+AuctionOracleKDTreeRestricted::AuctionOracleKDTreeRestricted(const std::vector<DiagramPoint>& _bidders,
+ const std::vector<DiagramPoint>& _items,
+ const double _wassersteinPower,
+ const double internal_p) :
+ AuctionOracleAbstract(_bidders, _items, _wassersteinPower, internal_p),
+ heapHandlesIndices(items.size(), std::numeric_limits<size_t>::max()),
+ kdtreeItems(items.size(), std::numeric_limits<size_t>::max()),
+ bestDiagonalItemsComputed(false)
+{
+ size_t dnnItemIdx { 0 };
+ size_t trueIdx { 0 };
+ dnnPoints.clear();
+ // store normal items in kd-tree
+ for(const auto& g : items) {
+ if (g.isNormal() ) {
+ kdtreeItems[trueIdx] = dnnItemIdx;
+ // index of items is id of dnn-point
+ DnnPoint p(trueIdx);
+ p[0] = g.getRealX();
+ p[1] = g.getRealY();
+ dnnPoints.push_back(p);
+ assert(dnnItemIdx == dnnPoints.size() - 1);
+ dnnItemIdx++;
+ }
+ trueIdx++;
+ }
+
+ assert(dnnPoints.size() < items.size() );
+ for(size_t i = 0; i < dnnPoints.size(); ++i) {
+ dnnPointHandles.push_back(&dnnPoints[i]);
+ }
+ DnnTraits traits;
+ //std::cout << "kdtree: " << dnnPointHandles.size() << " points" << std::endl;
+ kdtree = new dnn::KDTree<DnnTraits>(traits, dnnPointHandles, wassersteinPower);
+
+ size_t handleIdx {0};
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ if (items[itemIdx].isDiagonal()) {
+ heapHandlesIndices[itemIdx] = handleIdx++;
+ diagHeapHandles.push_back(diagItemsHeap.push(std::make_pair(itemIdx, 0)));
+ }
+ }
+ //to-do: remove maxVal from
+ //std::cout << "3getFurthestDistance3Approx = " << getFurthestDistance3Approx(_bidders, _items) << std::endl;
+ maxVal = 3*getFurthestDistance3Approx(_bidders, _items);
+ maxVal = pow(maxVal, wassersteinPower);
+ weightAdjConst = maxVal;
+ //std::cout << "AuctionOracleKDTreeRestricted: weightAdjConst = " << weightAdjConst << std::endl;
+ //std::cout << "AuctionOracleKDTreeRestricted constructor done" << std::endl;
+}
+
+DebugOptimalBid AuctionOracleKDTreeRestricted::getOptimalBidDebug(IdxType bidderIdx)
+{
+ DebugOptimalBid result;
+ DiagramPoint bidder = bidders[bidderIdx];
+
+ //std::cout << "bidder.x = " << bidderDnn[0] << std::endl;
+ //std::cout << "bidder.y = " << bidderDnn[1] << std::endl;
+
+ // corresponding point is always considered as a candidate
+ // if bidder is a diagonal point, projItem is a normal point,
+ // and vice versa.
+
+ size_t projItemIdx = bidderIdx;
+ assert( 0 <= projItemIdx and projItemIdx < items.size() );
+ DiagramPoint projItem = items[projItemIdx];
+ assert(projItem.type != bidder.type);
+ //assert(projItem.projId == bidder.id);
+ //assert(projItem.id == bidder.projId);
+ // todo: store precomputed distance?
+ double projItemValue = pow(distLp(bidder, projItem, internal_p), wassersteinPower) + prices[projItemIdx];
+
+ if (bidder.isDiagonal()) {
+ // for diagonal bidder the only normal point has already been added
+ // the other 2 candidates are diagonal items only, get from the heap
+ // with prices
+ assert(diagItemsHeap.size() > 1);
+ if (!bestDiagonalItemsComputed) {
+ auto topDiagIter = diagItemsHeap.ordered_begin();
+ bestDiagonalItemIdx = topDiagIter->first;
+ bestDiagonalItemValue = topDiagIter->second;
+ topDiagIter++;
+ secondBestDiagonalItemIdx = topDiagIter->first;
+ secondBestDiagonalItemValue = topDiagIter->second;
+ bestDiagonalItemsComputed = true;
+ }
+
+ if ( projItemValue < bestDiagonalItemValue) {
+ result.bestItemIdx = projItemIdx;
+ result.bestItemValue = projItemValue;
+ result.secondBestItemIdx = bestDiagonalItemIdx;
+ result.secondBestItemValue = bestDiagonalItemValue;
+ } else if (projItemValue < secondBestDiagonalItemValue) {
+ result.bestItemIdx = bestDiagonalItemIdx;
+ result.bestItemValue = bestDiagonalItemValue;
+ result.secondBestItemIdx = projItemIdx;
+ result.secondBestItemValue = projItemValue;
+ } else {
+ result.bestItemIdx = bestDiagonalItemIdx;
+ result.bestItemValue = bestDiagonalItemValue;
+ result.secondBestItemIdx = secondBestDiagonalItemIdx;
+ result.secondBestItemValue = secondBestDiagonalItemValue;
+ }
+ } else {
+ // for normal bidder get 2 best items among non-diagonal points from
+ // kdtree
+ DnnPoint bidderDnn;
+ bidderDnn[0] = bidder.getRealX();
+ bidderDnn[1] = bidder.getRealY();
+ auto twoBestItems = kdtree->findK(bidderDnn, 2);
+ //std::cout << "twoBestItems for all: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ size_t bestNormalItemIdx { twoBestItems[0].p->id() };
+ double bestNormalItemValue { twoBestItems[0].d };
+ size_t secondBestNormalItemIdx { twoBestItems[1].p->id() };
+ double secondBestNormalItemValue { twoBestItems[1].d };
+
+ if ( projItemValue < bestNormalItemValue) {
+ result.bestItemIdx = projItemIdx;
+ result.bestItemValue = projItemValue;
+ result.secondBestItemIdx = bestNormalItemIdx;
+ result.secondBestItemValue = bestNormalItemValue;
+ } else if (projItemValue < secondBestNormalItemValue) {
+ result.bestItemIdx = bestNormalItemIdx;
+ result.bestItemValue = bestNormalItemValue;
+ result.secondBestItemIdx = projItemIdx;
+ result.secondBestItemValue = projItemValue;
+ } else {
+ result.bestItemIdx = bestNormalItemIdx;
+ result.bestItemValue = bestNormalItemValue;
+ result.secondBestItemIdx = secondBestNormalItemIdx;
+ result.secondBestItemValue = secondBestNormalItemValue;
+ }
+ }
+
+ return result;
+
+ //std::cout << "got result: " << result << std::endl;
+ //double bestItemsPrice = prices[bestItemIdx];
+ //if (items[result.bestItemIdx].type == DiagramPoint::DIAG) {
+ //double bestItemValue1 = pow( distLp(bidder, items[result.bestItemIdx]), wassersteinPower) + prices[result.bestItemIdx];
+ //if ( fabs(result.bestItemValue - bestItemValue1) > 1e-6 ) {
+ //std::cerr << "XXX: " << result.bestItemValue << " vs " << bestItemValue1 << std::endl;
+ //result.bestItemValue = bestItemValue1;
+ //}
+
+ //}
+
+
+ // checking code
+
+ /*
+ DebugOptimalBid debugMyResult(result);
+ DebugOptimalBid debugNaiveResult;
+ debugNaiveResult.bestItemValue = 1e20;
+ debugNaiveResult.secondBestItemValue = 1e20;
+ double currItemValue;
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ //if ( bidders[bidderIdx].type == DiagramPoint::NORMAL and
+ //items[itemIdx].type == DiagramPoint::DIAG and
+ //bidders[bidderIdx].projId != items[itemIdx].id)
+ //continue;
+
+ currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ if (currItemValue < debugNaiveResult.bestItemValue) {
+ debugNaiveResult.bestItemValue = currItemValue;
+ debugNaiveResult.bestItemIdx = itemIdx;
+ }
+ }
+
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ if (itemIdx == debugNaiveResult.bestItemIdx) {
+ continue;
+ }
+ currItemValue = pow(distLp(bidders[bidderIdx], items[itemIdx]), wassersteinPower) + prices[itemIdx];
+ if (currItemValue < debugNaiveResult.secondBestItemValue) {
+ debugNaiveResult.secondBestItemValue = currItemValue;
+ debugNaiveResult.secondBestItemIdx = itemIdx;
+ }
+ }
+ //std::cout << "got naive result" << std::endl;
+
+ if ( fabs( debugMyResult.bestItemValue - debugNaiveResult.bestItemValue ) > 1e-6 or
+ fabs( debugNaiveResult.secondBestItemValue - debugMyResult.secondBestItemValue) > 1e-6 ) {
+ kdtreeAll->printWeights();
+ std::cerr << "bidderIdx = " << bidderIdx << "; ";
+ std::cerr << bidders[bidderIdx] << std::endl;
+ for(size_t itemIdx = 0; itemIdx < items.size(); ++itemIdx) {
+ std::cout << itemIdx << ": " << items[itemIdx] << "; price = " << prices[itemIdx] << std::endl;
+ }
+ std::cerr << "debugMyResult: " << debugMyResult << std::endl;
+ std::cerr << "debugNaiveResult: " << debugNaiveResult << std::endl;
+ //std::cerr << "twoBestItems: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ assert(false);
+ }
+ //std::cout << "returning" << std::endl;
+
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemValue = " << bestItemValue << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << secondBestItemIdx << "; secondBestValue = " << secondBestItemValue << "; secondBestPrice = " << prices[secondBestItemIdx] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ //std::cout << "getOptimalBid: bidderIdx = " << bidderIdx << "; bestItemIdx = " << bestItemIdx << "; bestItemsDist= " << (weightAdjConst - bestItemValue) << "; bestItemsPrice = " << prices[bestItemIdx] << "; secondBestItemIdx = " << secondBestItemIdx << "; secondBestDist= " << (weightAdjConst - secondBestItemValue) << "; secondBestPrice = " << prices[secondBestItemIdx] << "; bid = " << prices[bestItemIdx] + ( bestItemValue - secondBestItemValue ) + epsilon << "; epsilon = " << epsilon << std::endl;
+ */
+ return result;
+}
+
+IdxValPair AuctionOracleKDTreeRestricted::getOptimalBid(IdxType bidderIdx)
+{
+
+
+ DiagramPoint bidder = bidders[bidderIdx];
+
+ //std::cout << "bidder.x = " << bidderDnn[0] << std::endl;
+ //std::cout << "bidder.y = " << bidderDnn[1] << std::endl;
+
+ // corresponding point is always considered as a candidate
+ // if bidder is a diagonal point, projItem is a normal point,
+ // and vice versa.
+
+ size_t bestItemIdx;
+ double bestItemValue;
+ double secondBestItemValue;
+
+
+ size_t projItemIdx = bidderIdx;
+ assert( 0 <= projItemIdx and projItemIdx < items.size() );
+ DiagramPoint projItem = items[projItemIdx];
+ assert(projItem.type != bidder.type);
+ //assert(projItem.projId == bidder.id);
+ //assert(projItem.id == bidder.projId);
+ // todo: store precomputed distance?
+ double projItemValue = pow(distLp(bidder, projItem, internal_p), wassersteinPower) + prices[projItemIdx];
+
+ if (bidder.isDiagonal()) {
+ // for diagonal bidder the only normal point has already been added
+ // the other 2 candidates are diagonal items only, get from the heap
+ // with prices
+
+ if (not bestDiagonalItemsComputed) {
+ auto topDiagIter = diagItemsHeap.ordered_begin();
+ bestDiagonalItemIdx = topDiagIter->first;
+ bestDiagonalItemValue = topDiagIter->second;
+ if (diagItemsHeap.size() > 1) {
+ topDiagIter++;
+ secondBestDiagonalItemIdx = topDiagIter->first;
+ secondBestDiagonalItemValue = topDiagIter->second;
+ } else {
+ // there is only one diagonal point at all,
+ // ensure that second best diagonal value
+ // will lose to projection item
+ secondBestDiagonalItemValue = std::numeric_limits<double>::max();
+ secondBestDiagonalItemIdx = -1;
+ }
+ bestDiagonalItemsComputed = true;
+ }
+
+ if ( projItemValue < bestDiagonalItemValue) {
+ bestItemIdx = projItemIdx;
+ bestItemValue = projItemValue;
+ secondBestItemValue = bestDiagonalItemValue;
+ } else if (projItemValue < secondBestDiagonalItemValue) {
+ bestItemIdx = bestDiagonalItemIdx;
+ bestItemValue = bestDiagonalItemValue;
+ secondBestItemValue = projItemValue;
+ } else {
+ bestItemIdx = bestDiagonalItemIdx;
+ bestItemValue = bestDiagonalItemValue;
+ secondBestItemValue = secondBestDiagonalItemValue;
+ }
+ } else {
+ // for normal bidder get 2 best items among non-diagonal points from
+ // kdtree
+ DnnPoint bidderDnn;
+ bidderDnn[0] = bidder.getRealX();
+ bidderDnn[1] = bidder.getRealY();
+ auto twoBestItems = kdtree->findK(bidderDnn, 2);
+ //std::cout << "twoBestItems for all: " << twoBestItems[0].d << " " << twoBestItems[1].d << std::endl;
+ size_t bestNormalItemIdx { twoBestItems[0].p->id() };
+ double bestNormalItemValue { twoBestItems[0].d };
+ // if there is only one off-diagonal point in the second diagram,
+ // kd-tree will not return the second candidate.
+ // Set its value to inf, so it will always lose to the value of the projection
+ double secondBestNormalItemValue { twoBestItems.size() == 1 ? std::numeric_limits<double>::max() : twoBestItems[1].d };
+
+ if ( projItemValue < bestNormalItemValue) {
+ bestItemIdx = projItemIdx;
+ bestItemValue = projItemValue;
+ secondBestItemValue = bestNormalItemValue;
+ } else if (projItemValue < secondBestNormalItemValue) {
+ bestItemIdx = bestNormalItemIdx;
+ bestItemValue = bestNormalItemValue;
+ secondBestItemValue = projItemValue;
+ } else {
+ bestItemIdx = bestNormalItemIdx;
+ bestItemValue = bestNormalItemValue;
+ secondBestItemValue = secondBestNormalItemValue;
+ }
+ }
+
+ IdxValPair result;
+
+ assert( secondBestItemValue >= bestItemValue );
+
+ result.first = bestItemIdx;
+ result.second = ( secondBestItemValue - bestItemValue ) + prices[bestItemIdx] + epsilon;
+ return result;
+}
+/*
+a_{ij} = d_{ij}
+value_{ij} = a_{ij} + price_j
+*/
+void AuctionOracleKDTreeRestricted::setPrice(IdxType itemIdx, double newPrice)
+{
+ assert(prices.size() == items.size());
+ assert( 0 < diagHeapHandles.size() and diagHeapHandles.size() <= items.size());
+ assert(newPrice > prices.at(itemIdx));
+ prices[itemIdx] = newPrice;
+ if ( items[itemIdx].isNormal() ) {
+ //std::cout << "before increasing weight in kdtree " << std::endl;
+ //std::cout << kdtreeItems.at(itemIdx) << std::endl;
+ assert(0 <= itemIdx and itemIdx < kdtreeItems.size());
+ assert(0 <= kdtreeItems[itemIdx] and kdtreeItems[itemIdx] < dnnPointHandles.size());
+ kdtree->increase_weight( dnnPointHandles[kdtreeItems[itemIdx]], newPrice);
+ //std::cout << "after increasing weight in kdtree" << std::endl;
+ } else {
+ //std::cout << "before decreasing weight in heap" << std::endl;
+ //std::cout << "diagHeapHandles.size = " << diagHeapHandles.size() << std::endl;
+ assert(diagHeapHandles.size() > heapHandlesIndices.at(itemIdx));
+ diagItemsHeap.decrease(diagHeapHandles[heapHandlesIndices[itemIdx]], std::make_pair(itemIdx, newPrice));
+ bestDiagonalItemsComputed = false;
+ }
+}
+
+void AuctionOracleKDTreeRestricted::adjustPrices(void)
+{
+}
+
+AuctionOracleKDTreeRestricted::~AuctionOracleKDTreeRestricted()
+{
+ delete kdtree;
+}
+
+void AuctionOracleKDTreeRestricted::setEpsilon(double newVal)
+{
+ assert(newVal >= 0.0);
+ epsilon = newVal;
+}
+
+std::ostream& operator<< (std::ostream& output, const DebugOptimalBid& db)
+{
+ std::cout << "bestItemValue = " << db.bestItemValue;
+ std::cout << "; bestItemIdx = " << db.bestItemIdx;
+ std::cout << "; secondBestItemValue = " << db.secondBestItemValue;
+ std::cout << "; secondBestItemIdx = " << db.secondBestItemIdx;
+ return output;
+}
+
+} // end of namespace geom_ws
diff --git a/geom_matching/wasserstein/src/auction_runner_gs.cpp b/geom_matching/wasserstein/src/auction_runner_gs.cpp
new file mode 100644
index 0000000..10d37c9
--- /dev/null
+++ b/geom_matching/wasserstein/src/auction_runner_gs.cpp
@@ -0,0 +1,341 @@
+/*
+
+Copyright (c) 2016, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+
+#include <assert.h>
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <chrono>
+
+#include "def_debug.h"
+#include "auction_runner_gs.h"
+#include "wasserstein.h"
+
+//#define PRINT_DETAILED_TIMING
+
+namespace geom_ws {
+
+// *****************************
+// AuctionRunnerGS
+// *****************************
+
+AuctionRunnerGS::AuctionRunnerGS(const std::vector<DiagramPoint>& A, const std::vector<DiagramPoint>& B, const double q, const double _delta, const double _internal_p, const double _initialEpsilon, const double _epsFactor) :
+ bidders(A),
+ items(B),
+ numBidders(A.size()),
+ numItems(A.size()),
+ itemsToBidders(A.size(), -1),
+ biddersToItems(A.size(), -1),
+ wassersteinPower(q),
+ delta(_delta),
+ internal_p(_internal_p),
+ initialEpsilon(_initialEpsilon),
+ epsilonCommonRatio(_epsFactor == 0.0 ? 5.0 : _epsFactor)
+{
+ assert(initialEpsilon >= 0.0 );
+ assert(epsilonCommonRatio >= 0.0 );
+ assert(A.size() == B.size());
+ oracle = std::unique_ptr<AuctionOracle>(new AuctionOracle(bidders, items, wassersteinPower, internal_p));
+}
+
+void AuctionRunnerGS::assignItemToBidder(IdxType itemIdx, IdxType bidderIdx)
+{
+ numRounds++;
+ //sanityCheck();
+ // only unassigned bidders should submit bids and get items
+ assert(biddersToItems[bidderIdx] == -1);
+ IdxType oldItemOwner = itemsToBidders[itemIdx];
+
+ // set new owner
+ biddersToItems[bidderIdx] = itemIdx;
+ itemsToBidders[itemIdx] = bidderIdx;
+ // remove bidder from the list of unassigned bidders
+#ifdef KEEP_UNASSIGNED_ORDERED
+ unassignedBidders.erase(std::make_pair(bidderIdx, bidders[bidderIdx]));
+#else
+ unassignedBidders.erase(bidderIdx);
+#endif
+
+ // old owner becomes unassigned
+ if (oldItemOwner != -1) {
+ biddersToItems[oldItemOwner] = -1;
+#ifdef KEEP_UNASSIGNED_ORDERED
+ unassignedBidders.insert(std::make_pair(oldItemOwner, bidders[oldItemOwner]));
+#else
+ unassignedBidders.insert(oldItemOwner);
+#endif
+ }
+}
+
+
+void AuctionRunnerGS::flushAssignment(void)
+{
+ for(auto& b2i : biddersToItems) {
+ b2i = -1;
+ }
+ for(auto& i2b : itemsToBidders) {
+ i2b = -1;
+ }
+ // we must flush assignment only after we got perfect matching
+ assert(unassignedBidders.empty());
+ // all bidders become unassigned
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+#ifdef KEEP_UNASSIGNED_ORDERED
+ unassignedBidders.insert(std::make_pair(bidderIdx, bidders[bidderIdx]));
+#else
+ unassignedBidders.insert(bidderIdx);
+#endif
+ }
+ assert(unassignedBidders.size() == bidders.size());
+ //oracle->adjustPrices();
+}
+
+void AuctionRunnerGS::runAuction(void)
+{
+#ifdef PRINT_DETAILED_TIMING
+ std::chrono::high_resolution_clock hrClock;
+ std::chrono::time_point<std::chrono::high_resolution_clock> startMoment;
+ startMoment = hrClock.now();
+ std::vector<double> iterResults;
+ std::vector<double> iterEstRelErrors;
+ std::vector<std::chrono::time_point<std::chrono::high_resolution_clock>> iterTimes;
+#endif
+ // choose some initial epsilon
+ if (initialEpsilon == 0.0)
+ oracle->setEpsilon(oracle->maxVal / 4.0);
+ else
+ oracle->setEpsilon(initialEpsilon);
+ assert( oracle->getEpsilon() > 0 );
+ int iterNum { 0 };
+ bool notDone { false };
+ double currentResult;
+ do {
+ flushAssignment();
+ runAuctionPhase();
+ iterNum++;
+ //std::cout << "Iteration " << iterNum << " completed. " << std::endl;
+ // result is d^q
+ currentResult = getDistanceToQthPowerInternal();
+ double denominator = currentResult - numBidders * oracle->getEpsilon();
+ currentResult = pow(currentResult, 1.0 / wassersteinPower);
+#ifdef PRINT_DETAILED_TIMING
+ iterResults.push_back(currentResult);
+ iterTimes.push_back(hrClock.now());
+ std::cout << "Iteration " << iterNum << " finished. ";
+ std::cout << "Current result is " << currentResult << ", epsilon = " << oracle->getEpsilon() << std::endl;
+ std::cout << "Number of rounds (cumulative): " << numRounds << std::endl;
+#endif
+ if ( denominator <= 0 ) {
+ //std::cout << "Epsilon is too big." << std::endl;
+ notDone = true;
+ } else {
+ denominator = pow(denominator, 1.0 / wassersteinPower);
+ double numerator = currentResult - denominator;
+#ifdef PRINT_DETAILED_TIMING
+ std::cout << " numerator: " << numerator << " denominator: " << denominator;
+ std::cout << "; error bound: " << numerator / denominator << std::endl;
+#endif
+ // if relative error is greater than delta, continue
+ notDone = ( numerator / denominator > delta );
+ }
+ // decrease epsilon for the next iteration
+ oracle->setEpsilon( oracle->getEpsilon() / epsilonCommonRatio );
+ if (iterNum > maxIterNum) {
+ std::cerr << "Maximum iteration number exceeded, exiting. Current result is:";
+ std::cerr << wassersteinDistance << std::endl;
+ std::exit(1);
+ }
+ } while ( notDone );
+ //printMatching();
+#ifdef PRINT_DETAILED_TIMING
+ for(size_t iterIdx = 0; iterIdx < iterResults.size(); ++iterIdx) {
+ double trueRelError = ( iterResults.at(iterIdx) - currentResult ) / currentResult;
+ auto iterCumulativeTime = iterTimes.at(iterIdx) - startMoment;
+ std::chrono::duration<double, std::milli> iterTime = ( iterIdx > 0) ? iterTimes[iterIdx] - iterTimes[iterIdx - 1] : iterTimes[iterIdx] - startMoment;
+ std::cout << "iteration " << iterIdx << ", true rel. error " <<
+ trueRelError << ", elapsed time " <<
+ std::chrono::duration<double, std::milli>(iterCumulativeTime).count() <<
+ ", iteration time " << iterTime.count() << std::endl;
+ }
+#endif
+}
+
+void AuctionRunnerGS::runAuctionPhase(void)
+{
+ //std::cout << "Entered runAuctionPhase" << std::endl;
+ do {
+#ifdef KEEP_UNASSIGNED_ORDERED
+ size_t bidderIdx = unassignedBidders.begin()->first;
+#else
+ size_t bidderIdx = *unassignedBidders.begin();
+#endif
+ auto optimalBid = oracle->getOptimalBid(bidderIdx);
+ auto optimalItemIdx = optimalBid.first;
+ auto bidValue = optimalBid.second;
+ assignItemToBidder(optimalBid.first, bidderIdx);
+ oracle->setPrice(optimalItemIdx, bidValue);
+ //printDebug();
+ } while (not unassignedBidders.empty());
+ //std::cout << "runAuctionPhase finished" << std::endl;
+
+#ifdef DEBUG_AUCTION
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+ if ( biddersToItems[bidderIdx] < 0) {
+ std::cerr << "After auction terminated bidder " << bidderIdx;
+ std::cerr << " has no items assigned" << std::endl;
+ throw "Auction did not give a perfect matching";
+ }
+ }
+#endif
+
+}
+
+double AuctionRunnerGS::getDistanceToQthPowerInternal(void)
+{
+ sanityCheck();
+ double result = 0.0;
+ for(size_t bIdx = 0; bIdx < numBidders; ++bIdx) {
+ auto pA = bidders[bIdx];
+ assert( 0 <= biddersToItems[bIdx] and biddersToItems[bIdx] < static_cast<int>(items.size()) );
+ auto pB = items[biddersToItems[bIdx]];
+ result += pow(distLp(pA, pB, internal_p), wassersteinPower);
+ }
+ wassersteinDistance = pow(result, 1.0 / wassersteinPower);
+ return result;
+}
+
+double AuctionRunnerGS::getWassersteinDistance(void)
+{
+ runAuction();
+ return wassersteinDistance;
+}
+
+// Debug routines
+
+void AuctionRunnerGS::printDebug(void)
+{
+#ifdef DEBUG_AUCTION
+ sanityCheck();
+ std::cout << "**********************" << std::endl;
+ std::cout << "Current assignment:" << std::endl;
+ for(size_t idx = 0; idx < biddersToItems.size(); ++idx) {
+ std::cout << idx << " <--> " << biddersToItems[idx] << std::endl;
+ }
+ std::cout << "Weights: " << std::endl;
+ //for(size_t i = 0; i < numBidders; ++i) {
+ //for(size_t j = 0; j < numItems; ++j) {
+ //std::cout << oracle->weightMatrix[i][j] << " ";
+ //}
+ //std::cout << std::endl;
+ //}
+ std::cout << "Prices: " << std::endl;
+ for(const auto price : oracle->getPrices()) {
+ std::cout << price << std::endl;
+ }
+ std::cout << "**********************" << std::endl;
+#endif
+}
+
+
+void AuctionRunnerGS::sanityCheck(void)
+{
+#ifdef DEBUG_AUCTION
+ if (biddersToItems.size() != numBidders) {
+ std::cerr << "Wrong size of biddersToItems, must be " << numBidders << ", is " << biddersToItems.size() << std::endl;
+ throw "Wrong size of biddersToItems";
+ }
+
+ if (itemsToBidders.size() != numBidders) {
+ std::cerr << "Wrong size of itemsToBidders, must be " << numBidders << ", is " << itemsToBidders.size() << std::endl;
+ throw "Wrong size of itemsToBidders";
+ }
+
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+ if ( biddersToItems[bidderIdx] >= 0) {
+
+ if ( std::count(biddersToItems.begin(),
+ biddersToItems.end(),
+ biddersToItems[bidderIdx]) > 1 ) {
+ std::cerr << "Item " << biddersToItems[bidderIdx];
+ std::cerr << " appears in biddersToItems more than once" << std::endl;
+ throw "Duplicate in biddersToItems";
+ }
+
+ if (itemsToBidders.at(biddersToItems[bidderIdx]) != static_cast<int>(bidderIdx)) {
+ std::cerr << "Inconsitency: bidderIdx = " << bidderIdx;
+ std::cerr << ", itemIdx in biddersToItems = ";
+ std::cerr << biddersToItems[bidderIdx];
+ std::cerr << ", bidderIdx in itemsToBidders = ";
+ std::cerr << itemsToBidders[biddersToItems[bidderIdx]] << std::endl;
+ throw "inconsistent mapping";
+ }
+ }
+ }
+
+ for(IdxType itemIdx = 0; itemIdx < static_cast<IdxType>(numBidders); ++itemIdx) {
+ if ( itemsToBidders[itemIdx] >= 0) {
+
+ // check for uniqueness
+ if ( std::count(itemsToBidders.begin(),
+ itemsToBidders.end(),
+ itemsToBidders[itemIdx]) > 1 ) {
+ std::cerr << "Bidder " << itemsToBidders[itemIdx];
+ std::cerr << " appears in itemsToBidders more than once" << std::endl;
+ throw "Duplicate in itemsToBidders";
+ }
+ // check for consistency
+ if (biddersToItems.at(itemsToBidders[itemIdx]) != static_cast<int>(itemIdx)) {
+ std::cerr << "Inconsitency: itemIdx = " << itemIdx;
+ std::cerr << ", bidderIdx in itemsToBidders = ";
+ std::cerr << itemsToBidders[itemIdx];
+ std::cerr << ", itemIdx in biddersToItems= ";
+ std::cerr << biddersToItems[itemsToBidders[itemIdx]] << std::endl;
+ throw "inconsistent mapping";
+ }
+ }
+ }
+#endif
+}
+
+void AuctionRunnerGS::printMatching(void)
+{
+//#ifdef DEBUG_AUCTION
+ sanityCheck();
+ for(size_t bIdx = 0; bIdx < biddersToItems.size(); ++bIdx) {
+ if (biddersToItems[bIdx] >= 0) {
+ auto pA = bidders[bIdx];
+ auto pB = items[biddersToItems[bIdx]];
+ std::cout << pA << " <-> " << pB << "+" << pow(distLp(pA, pB, internal_p), wassersteinPower) << std::endl;
+ } else {
+ assert(false);
+ }
+ }
+//#endif
+}
+
+} // end of namespace geom_ws
diff --git a/geom_matching/wasserstein/src/auction_runner_jac.cpp b/geom_matching/wasserstein/src/auction_runner_jac.cpp
new file mode 100644
index 0000000..dcade94
--- /dev/null
+++ b/geom_matching/wasserstein/src/auction_runner_jac.cpp
@@ -0,0 +1,365 @@
+/*
+
+Copyright (c) 2016, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#include <assert.h>
+#include <algorithm>
+#include <functional>
+#include <iterator>
+
+#include "def_debug.h"
+#include "auction_runner_jac.h"
+#include "wasserstein.h"
+
+namespace geom_ws {
+
+// *****************************
+// AuctionRunnerJak
+// *****************************
+
+AuctionRunnerJak::AuctionRunnerJak(const std::vector<DiagramPoint>& A, const std::vector<DiagramPoint>& B, const double q, const double _delta, const double _internal_p) :
+ bidders(A),
+ items(B),
+ numBidders(A.size()),
+ numItems(A.size()),
+ itemsToBidders(A.size(), -1),
+ biddersToItems(A.size(), -1),
+ wassersteinPower(q),
+ delta(_delta),
+ internal_p(_internal_p),
+ bidTable(A.size(), std::make_pair(-1, std::numeric_limits<double>::lowest()) ),
+ itemReceivedBidVec(B.size(), 0 )
+{
+ assert(A.size() == B.size());
+ oracle = std::unique_ptr<AuctionOracle>(new AuctionOracle(bidders, items, wassersteinPower, internal_p));
+}
+
+void AuctionRunnerJak::assignGoodToBidder(IdxType itemIdx, IdxType bidderIdx)
+{
+ //sanityCheck();
+ IdxType myOldItem = biddersToItems[bidderIdx];
+ IdxType currItemOwner = itemsToBidders[itemIdx];
+
+ // set new owner
+ biddersToItems[bidderIdx] = itemIdx;
+ itemsToBidders[itemIdx] = bidderIdx;
+
+
+ // remove bidder from the list of unassigned bidders
+ unassignedBidders.erase( unassignedBiddersIterators[bidderIdx] );
+ assert( 0 <= bidderIdx and bidderIdx < unassignedBiddersIterators.size() );
+ unassignedBiddersIterators[bidderIdx] = unassignedBidders.end();
+
+ if (-1 == currItemOwner) {
+ // the item we want to assign does not belong to anybody,
+ // just free myOldItem, if necessary
+ // RE: this cannot be necessary. I submitted the best bid, hence I was
+ // an unassigned bidder.
+ if (myOldItem != -1) {
+ std::cout << "This is not happening" << std::endl;
+ assert(false);
+ itemsToBidders[myOldItem] = -1;
+ }
+ } else {
+ // the current owner of itemIdx gets my old item (OK if it's -1)
+ biddersToItems[currItemOwner] = myOldItem;
+ // add the old owner of bids to the list of
+ if ( -1 != myOldItem ) {
+ std::cout << "This is not happening" << std::endl;
+ assert(false);
+ // if I had something, update itemsToBidders, too
+ // RE: nonsense: if I had something, I am not unassigned and did not
+ // sumbit any bid
+ itemsToBidders[myOldItem] = currItemOwner;
+ }
+ unassignedBidders.push_back(currItemOwner);
+ assert( unassignedBiddersIterators[currItemOwner] == unassignedBidders.end() );
+ unassignedBiddersIterators[currItemOwner] = std::prev( unassignedBidders.end() );
+ }
+ //sanityCheck();
+}
+
+
+void AuctionRunnerJak::assignToBestBidder(IdxType itemIdx)
+{
+ assert( itemIdx >= 0 and itemIdx < static_cast<IdxType>(numItems) );
+ assert( bidTable[itemIdx].first != -1);
+ IdxValPair bestBid { bidTable[itemIdx] };
+ assignGoodToBidder(itemIdx, bestBid.first);
+ //std::cout << "About to call setPrice" << std::endl;
+ oracle->setPrice(itemIdx, bestBid.second);
+ //dynamic_cast<AuctionOracleKDTree*>(oracle)->setNai
+}
+
+void AuctionRunnerJak::clearBidTable(void)
+{
+ for(auto& itemWithBidIdx : itemsWithBids) {
+ itemReceivedBidVec[itemWithBidIdx] = 0;
+ bidTable[itemWithBidIdx].first = -1;
+ bidTable[itemWithBidIdx].second = std::numeric_limits<double>::lowest();
+ }
+ itemsWithBids.clear();
+}
+
+void AuctionRunnerJak::submitBid(IdxType bidderIdx, const IdxValPair& itemsBidValuePair)
+{
+ IdxType itemIdx = itemsBidValuePair.first;
+ double bidValue = itemsBidValuePair.second;
+ assert( itemIdx >= 0 );
+ if ( bidTable[itemIdx].second < itemsBidValuePair.second ) {
+ bidTable[itemIdx].first = bidderIdx;
+ bidTable[itemIdx].second = bidValue;
+ }
+ if (0 == itemReceivedBidVec[itemIdx]) {
+ itemReceivedBidVec[itemIdx] = 1;
+ itemsWithBids.push_back(itemIdx);
+ }
+}
+
+void AuctionRunnerJak::printDebug(void)
+{
+#ifdef DEBUG_AUCTION
+ sanityCheck();
+ std::cout << "**********************" << std::endl;
+ std::cout << "Current assignment:" << std::endl;
+ for(size_t idx = 0; idx < biddersToItems.size(); ++idx) {
+ std::cout << idx << " <--> " << biddersToItems[idx] << std::endl;
+ }
+ std::cout << "Weights: " << std::endl;
+ //for(size_t i = 0; i < numBidders; ++i) {
+ //for(size_t j = 0; j < numItems; ++j) {
+ //std::cout << oracle->weightMatrix[i][j] << " ";
+ //}
+ //std::cout << std::endl;
+ //}
+ std::cout << "Prices: " << std::endl;
+ for(const auto price : oracle->getPrices()) {
+ std::cout << price << std::endl;
+ }
+ //std::cout << "Value matrix: " << std::endl;
+ //for(size_t i = 0; i < numBidders; ++i) {
+ //for(size_t j = 0; j < numItems; ++j) {
+ //std::cout << oracle->weightMatrix[i][j] - oracle->prices[j] << " ";
+ //}
+ //std::cout << std::endl;
+ //}
+ std::cout << "**********************" << std::endl;
+#endif
+}
+
+void AuctionRunnerJak::flushAssignment(void)
+{
+ for(auto& b2g : biddersToItems) {
+ b2g = -1;
+ }
+ for(auto& g2b : itemsToBidders) {
+ g2b = -1;
+ }
+ //oracle->adjustPrices();
+}
+
+void AuctionRunnerJak::runAuction(void)
+{
+ // relative error
+ // choose some initial epsilon
+ oracle->setEpsilon(oracle->maxVal / 4.0);
+ assert( oracle->getEpsilon() > 0 );
+ int iterNum { 0 };
+ bool notDone { false };
+ do {
+ flushAssignment();
+ runAuctionPhase();
+ iterNum++;
+ //std::cout << "Iteration " << iterNum << " completed. " << std::endl;
+ // result is d^q
+ double currentResult = getDistanceToQthPowerInternal();
+ double denominator = currentResult - numBidders * oracle->getEpsilon();
+ currentResult = pow(currentResult, 1.0 / wassersteinPower);
+ //std::cout << "Current result is " << currentResult << std::endl;
+ if ( denominator <= 0 ) {
+ //std::cout << "Epsilon is too big." << std::endl;
+ notDone = true;
+ } else {
+ denominator = pow(denominator, 1.0 / wassersteinPower);
+ double numerator = currentResult - denominator;
+ //std::cout << " numerator: " << numerator << " denominator: " << denominator << std::endl;
+ //std::cout << " error bound: " << numerator / denominator << std::endl;
+ // if relative error is greater than delta, continue
+ notDone = ( numerator / denominator > delta );
+ }
+ // decrease epsilon for the next iteration
+ oracle->setEpsilon( oracle->getEpsilon() / epsilonCommonRatio );
+ if (iterNum > maxIterNum) {
+ std::cerr << "Maximum iteration number exceeded, exiting. Current result is:";
+ std::cerr << wassersteinDistance << std::endl;
+ std::exit(1);
+ }
+ } while ( notDone );
+ //printMatching();
+}
+
+void AuctionRunnerJak::runAuctionPhase(void)
+{
+ //std::cout << "Entered runAuctionPhase" << std::endl;
+ //int numUnassignedBidders { 0 };
+
+ // at the beginning of a phase all bidders are unassigned
+ unassignedBidders.clear();
+ unassignedBiddersIterators.clear();
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+ unassignedBidders.push_back(bidderIdx);
+ unassignedBiddersIterators.push_back( std::prev( unassignedBidders.end() ));
+ }
+ do {
+ // bidding phase
+ clearBidTable();
+ for(const auto bidderIdx : unassignedBidders) {
+ submitBid(bidderIdx, oracle->getOptimalBid(bidderIdx));
+ }
+ //std::cout << "Number of unassignedBidders: " << unassignedBidders.size() << std::endl;
+
+ // assignment phase
+ // todo: maintain list of items that received a bid
+ for(auto itemIdx : itemsWithBids ) {
+ assignToBestBidder(itemIdx);
+ }
+ //std::cout << "Assignment phase done" << std::endl;
+ //sanityCheck();
+ //printDebug();
+ } while (unassignedBidders.size() > 0);
+ //std::cout << "runAuctionPhase finished" << std::endl;
+
+
+#ifdef DEBUG_AUCTION
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+ if ( biddersToItems[bidderIdx] < 0) {
+ std::cerr << "After auction terminated bidder " << bidderIdx;
+ std::cerr << " has no items assigned" << std::endl;
+ throw "Auction did not give a perfect matching";
+ }
+ }
+#endif
+
+}
+
+// assertion: the matching must be perfect
+double AuctionRunnerJak::getDistanceToQthPowerInternal(void)
+{
+ sanityCheck();
+ double result = 0.0;
+ for(size_t bIdx = 0; bIdx < numBidders; ++bIdx) {
+ auto pA = bidders[bIdx];
+ assert( 0 <= biddersToItems[bIdx] and biddersToItems[bIdx] < items.size() );
+ auto pB = items[biddersToItems[bIdx]];
+ result += pow(distLp(pA, pB, internal_p), wassersteinPower);
+ }
+ wassersteinDistance = pow(result, 1.0 / wassersteinPower);
+ return result;
+}
+
+double AuctionRunnerJak::getWassersteinDistance(void)
+{
+ runAuction();
+ return wassersteinDistance;
+}
+
+void AuctionRunnerJak::sanityCheck(void)
+{
+#ifdef DEBUG_AUCTION
+ if (biddersToItems.size() != numBidders) {
+ std::cerr << "Wrong size of biddersToItems, must be " << numBidders << ", is " << biddersToItems.size() << std::endl;
+ throw "Wrong size of biddersToItems";
+ }
+
+ if (itemsToBidders.size() != numBidders) {
+ std::cerr << "Wrong size of itemsToBidders, must be " << numBidders << ", is " << itemsToBidders.size() << std::endl;
+ throw "Wrong size of itemsToBidders";
+ }
+
+ for(size_t bidderIdx = 0; bidderIdx < numBidders; ++bidderIdx) {
+ if ( biddersToItems[bidderIdx] >= 0) {
+
+ if ( std::count(biddersToItems.begin(),
+ biddersToItems.end(),
+ biddersToItems[bidderIdx]) > 1 ) {
+ std::cerr << "Good " << biddersToItems[bidderIdx];
+ std::cerr << " appears in biddersToItems more than once" << std::endl;
+ throw "Duplicate in biddersToItems";
+ }
+
+ if (itemsToBidders.at(biddersToItems[bidderIdx]) != static_cast<int>(bidderIdx)) {
+ std::cerr << "Inconsitency: bidderIdx = " << bidderIdx;
+ std::cerr << ", itemIdx in biddersToItems = ";
+ std::cerr << biddersToItems[bidderIdx];
+ std::cerr << ", bidderIdx in itemsToBidders = ";
+ std::cerr << itemsToBidders[biddersToItems[bidderIdx]] << std::endl;
+ throw "inconsistent mapping";
+ }
+ }
+ }
+
+ for(IdxType itemIdx = 0; itemIdx < static_cast<IdxType>(numBidders); ++itemIdx) {
+ if ( itemsToBidders[itemIdx] >= 0) {
+
+ // check for uniqueness
+ if ( std::count(itemsToBidders.begin(),
+ itemsToBidders.end(),
+ itemsToBidders[itemIdx]) > 1 ) {
+ std::cerr << "Bidder " << itemsToBidders[itemIdx];
+ std::cerr << " appears in itemsToBidders more than once" << std::endl;
+ throw "Duplicate in itemsToBidders";
+ }
+ // check for consistency
+ if (biddersToItems.at(itemsToBidders[itemIdx]) != static_cast<int>(itemIdx)) {
+ std::cerr << "Inconsitency: itemIdx = " << itemIdx;
+ std::cerr << ", bidderIdx in itemsToBidders = ";
+ std::cerr << itemsToBidders[itemIdx];
+ std::cerr << ", itemIdx in biddersToItems= ";
+ std::cerr << biddersToItems[itemsToBidders[itemIdx]] << std::endl;
+ throw "inconsistent mapping";
+ }
+ }
+ }
+#endif
+}
+
+void AuctionRunnerJak::printMatching(void)
+{
+//#ifdef DEBUG_AUCTION
+ sanityCheck();
+ for(size_t bIdx = 0; bIdx < biddersToItems.size(); ++bIdx) {
+ if (biddersToItems[bIdx] >= 0) {
+ auto pA = bidders[bIdx];
+ auto pB = items[biddersToItems[bIdx]];
+ std::cout << pA << " <-> " << pB << "+" << pow(distLp(pA, pB, internal_p), wassersteinPower) << std::endl;
+ } else {
+ assert(false);
+ }
+ }
+//#endif
+}
+
+} // end of namespace geom_ws
diff --git a/geom_matching/wasserstein/src/basic_defs.cpp b/geom_matching/wasserstein/src/basic_defs.cpp
new file mode 100644
index 0000000..d228123
--- /dev/null
+++ b/geom_matching/wasserstein/src/basic_defs.cpp
@@ -0,0 +1,138 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+*/
+
+#include <algorithm>
+#include <cfloat>
+#include <set>
+#include "basic_defs_ws.h"
+
+namespace geom_ws {
+// Point
+
+bool Point::operator==(const Point& other) const
+{
+ return ((this->x == other.x) and (this->y == other.y));
+}
+
+bool Point::operator!=(const Point& other) const
+{
+ return !(*this == other);
+}
+
+std::ostream& operator<<(std::ostream& output, const Point p)
+{
+ output << "(" << p.x << ", " << p.y << ")";
+ return output;
+}
+
+double sqrDist(const Point& a, const Point& b)
+{
+ return (a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y);
+}
+
+double dist(const Point& a, const Point& b)
+{
+ return sqrt(sqrDist(a, b));
+}
+
+// DiagramPoint
+
+// compute l-inf distance between two diagram points
+double distLInf(const DiagramPoint& a, const DiagramPoint& b)
+{
+ if (a.isDiagonal() and b.isDiagonal()) {
+ return 0.0;
+ } else {
+ return std::max(fabs(a.getRealX() - b.getRealX()), fabs(a.getRealY() - b.getRealY()));
+ }
+}
+
+double distLp(const DiagramPoint& a, const DiagramPoint& b, const double p)
+{
+ // infinity: special case
+ if ( std::isinf(p) )
+ return distLInf(a, b);
+
+ // check p
+ assert( p >= 1.0 );
+
+ // avoid calling pow function
+ if ( p == 1.0 ) {
+ if ( a.isNormal() or b.isNormal() ) {
+ // distance between normal points is a usual l-inf distance
+ return fabs(a.getRealX() - b.getRealX()) + fabs(a.getRealY() - b.getRealY());
+ } else
+ return 0.0;
+ }
+
+ if ( a.isNormal() or b.isNormal() ) {
+ // distance between normal points is a usual l-inf distance
+ return std::pow(std::pow(fabs(a.getRealX() - b.getRealX()), p) + std::pow(fabs(a.getRealY() - b.getRealY()), p), 1.0/p );
+ } else
+ return 0.0;
+}
+
+
+std::ostream& operator<<(std::ostream& output, const DiagramPoint p)
+{
+ if ( p.type == DiagramPoint::DIAG ) {
+ output << "(" << p.x << ", " << p.y << ", " << 0.5 * (p.x + p.y) << " DIAG )";
+ } else {
+ output << "(" << p.x << ", " << p.y << ", " << " NORMAL)";
+ }
+ return output;
+}
+
+
+DiagramPoint::DiagramPoint(double xx, double yy, Type ttype) :
+ x(xx),
+ y(yy),
+ type(ttype)
+{
+ //if ( yy < xx )
+ //throw "Point is below the diagonal";
+ //if ( yy == xx and ttype != DiagramPoint::DIAG)
+ //throw "Point on the main diagonal must have DIAG type";
+}
+
+double DiagramPoint::getRealX() const
+{
+ if (isNormal())
+ return x;
+ else
+ return 0.5 * ( x + y);
+}
+
+double DiagramPoint::getRealY() const
+{
+ if (isNormal())
+ return y;
+ else
+ return 0.5 * ( x + y);
+}
+
+} // end of namespace geom_ws
diff --git a/geom_matching/wasserstein/src/wasserstein.cpp b/geom_matching/wasserstein/src/wasserstein.cpp
new file mode 100644
index 0000000..5761f11
--- /dev/null
+++ b/geom_matching/wasserstein/src/wasserstein.cpp
@@ -0,0 +1,121 @@
+/*
+
+Copyright (c) 2015, M. Kerber, D. Morozov, A. Nigmetov
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches, or
+upgrades to the features, functionality or performance of the source code
+(Enhancements) to anyone; however, if you choose to make your Enhancements
+available either publicly, or directly to copyright holder,
+without imposing a separate written license agreement for such Enhancements,
+then you hereby grant the following license: a non-exclusive, royalty-free
+perpetual license to install, use, modify, prepare derivative works, incorporate
+into other computer software, distribute, and sublicense such enhancements or
+derivative works thereof, in binary and source code form.
+
+ */
+
+#include <assert.h>
+#include <algorithm>
+#include <functional>
+#include <iterator>
+
+#include "def_debug.h"
+#include "wasserstein.h"
+
+#ifdef GAUSS_SEIDEL_AUCTION
+#include "auction_runner_gs.h"
+#else
+#include "auction_runner_jak.h"
+#endif
+
+namespace geom_ws {
+
+double wassersteinDistVec(const std::vector<DiagramPoint>& A,
+ const std::vector<DiagramPoint>& B,
+ const double q,
+ const double delta,
+ const double _internal_p,
+ const double _initialEpsilon,
+ const double _epsFactor)
+{
+ if (q < 1) {
+ std::cerr << "Wasserstein distance not defined for q = " << q << ", must be >= 1" << std::endl;
+ throw "Bad q in Wasserstein";
+ }
+ if (delta < 0.0) {
+ std::cerr << "Relative error " << delta << ", must be > 0" << std::endl;
+ throw "Bad delta in Wasserstein";
+ }
+ if (_initialEpsilon < 0.0) {
+ std::cerr << "Initial epsilon = " << _initialEpsilon << ", must be non-negative" << std::endl;
+ throw "Bad delta in Wasserstein";
+ }
+ if (_epsFactor < 0.0) {
+ std::cerr << "Epsilon factor = " << _epsFactor << ", must be non-negative" << std::endl;
+ throw "Bad delta in Wasserstein";
+ }
+#ifdef GAUSS_SEIDEL_AUCTION
+ AuctionRunnerGS auction(A, B, q, delta, _internal_p, _initialEpsilon, _epsFactor);
+#else
+ AuctionRunnerJak auction(A, B, q, delta, _internal_p);
+#endif
+ return auction.getWassersteinDistance();
+}
+
+bool readDiagramPointSet(const std::string& fname, std::vector<std::pair<double, double>>& result)
+{
+ return readDiagramPointSet(fname.c_str(), result);
+}
+
+bool readDiagramPointSet(const char* fname, std::vector<std::pair<double, double>>& result)
+{
+ size_t lineNumber { 0 };
+ result.clear();
+ std::ifstream f(fname);
+ if (!f.good()) {
+ std::cerr << "Cannot open file " << fname << std::endl;
+ return false;
+ }
+ std::string line;
+ while(std::getline(f, line)) {
+ lineNumber++;
+ // process comments: remove everything after hash
+ auto hashPos = line.find_first_of("#", 0);
+ if( std::string::npos != hashPos) {
+ line = std::string(line.begin(), line.begin() + hashPos);
+ }
+ if (line.empty()) {
+ continue;
+ }
+ // trim whitespaces
+ auto whiteSpaceFront = std::find_if_not(line.begin(),line.end(),isspace);
+ auto whiteSpaceBack = std::find_if_not(line.rbegin(),line.rend(),isspace).base();
+ if (whiteSpaceBack <= whiteSpaceFront) {
+ // line consists of spaces only - move to the next line
+ continue;
+ }
+ line = std::string(whiteSpaceFront,whiteSpaceBack);
+ double x, y;
+ std::istringstream iss(line);
+ if (not(iss >> x >> y)) {
+ std::cerr << "Error in file " << fname << ", line number " << lineNumber << ": cannot parse \"" << line << "\"" << std::endl;
+ return false;
+ }
+ result.push_back(std::make_pair(x,y));
+ }
+ f.close();
+ return true;
+}
+
+} // end of namespace geom_ws