0.3.0: vendor latest libuv

This commit is contained in:
underscorediscovery 2020-02-11 20:19:15 -08:00
parent 1ffddc1457
commit 36efc63428
197 changed files with 13982 additions and 41096 deletions

78
deps/libuv/.gitignore vendored
View File

@ -1,78 +0,0 @@
*.swp
*.[oa]
*.l[oa]
*.opensdf
*.orig
*.pyc
*.sdf
*.suo
.vs/
*.VC.db
*.VC.opendb
core
vgcore.*
.buildstamp
.dirstamp
.deps/
/.libs/
/aclocal.m4
/ar-lib
/autom4te.cache/
/compile
/config.guess
/config.log
/config.status
/config.sub
/configure
/depcomp
/install-sh
/libtool
/libuv.a
/libuv.dylib
/libuv.pc
/libuv.so
/ltmain.sh
/missing
/test-driver
Makefile
Makefile.in
# Generated by gyp for android
*.target.mk
/out/
# /build/gyp (We do want to commit GYP in Wren's repo)
/test/.libs/
/test/run-tests
/test/run-tests.exe
/test/run-tests.dSYM
/test/run-benchmarks
/test/run-benchmarks.exe
/test/run-benchmarks.dSYM
*.sln
*.sln.cache
*.ncb
*.vcproj
*.vcproj*.user
*.vcxproj
*.vcxproj.filters
*.vcxproj.user
_UpgradeReport_Files/
UpgradeLog*.XML
Debug
Release
ipch
# sphinx generated files
/docs/build/
# Clion / IntelliJ project files
/.idea/
*.xcodeproj
*.xcworkspace
# make dist output
libuv-*.tar.*

40
deps/libuv/.mailmap vendored
View File

@ -1,40 +0,0 @@
Aaron Bieber <qbit@deftly.net> <deftly@gmail.com>
Alan Gutierrez <alan@prettyrobots.com> <alan@blogometer.com>
Andrius Bentkus <andrius.bentkus@gmail.com> <toxedvirus@gmail.com>
Bert Belder <bertbelder@gmail.com> <info@2bs.nl>
Bert Belder <bertbelder@gmail.com> <user@ChrUbuntu.(none)>
Brandon Philips <brandon.philips@rackspace.com> <brandon@ifup.org>
Brian White <mscdex@mscdex.net>
Brian White <mscdex@mscdex.net> <mscdex@gmail.com>
Caleb James DeLisle <cjd@hyperboria.ca> <cjd@cjdns.fr>
Christoph Iserlohn <christoph.iserlohn@innoq.com>
Devchandra Meetei Leishangthem <dlmeetei@gmail.com>
Fedor Indutny <fedor.indutny@gmail.com> <fedor@indutny.com>
Frank Denis <github@pureftpd.org>
Imran Iqbal <imrani@ca.ibm.com> <imran@imraniqbal.org>
Isaac Z. Schlueter <i@izs.me>
Jason Williams <necmon@yahoo.com>
Justin Venus <justin.venus@gmail.com> <justin.venus@orbitz.com>
Keno Fischer <kenof@stanford.edu> <kfischer+github@college.harvard.edu>
Keno Fischer <kenof@stanford.edu> <kfischer@college.harvard.edu>
Leith Bade <leith@leithalweapon.geek.nz> <leith@mapbox.com>
Leonard Hecker <leonard.hecker91@gmail.com> <leonard@hecker.io>
Maciej Małecki <maciej.malecki@notimplemented.org> <me@mmalecki.com>
Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com>
Michael <michael_dawson@ca.ibm.com>
Michael Neumann <mneumann@think.localnet> <mneumann@ntecs.de>
Nicholas Vavilov <vvnicholas@gmail.com>
Rasmus Christian Pedersen <zerhacken@yahoo.com>
Rasmus Christian Pedersen <zerhacken@yahoo.com> <ruysch@outlook.com>
Robert Mustacchi <rm@joyent.com> <rm@fingolfin.org>
Ryan Dahl <ryan@joyent.com> <ry@tinyclouds.org>
Ryan Emery <seebees@gmail.com>
Sam Roberts <vieuxtech@gmail.com> <sam@strongloop.com>
San-Tai Hsu <vanilla@fatpipi.com>
Santiago Gimeno <santiago.gimeno@quantion.es> <santiago.gimeno@gmail.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Shigeki Ohtsu <ohtsu@iij.ad.jp> <ohtsu@ohtsu.org>
Timothy J. Fontaine <tjfontaine@gmail.com>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Yazhong Liu <yorkiefixer@gmail.com>
Yuki Okumura <mjt@cltn.org>

279
deps/libuv/AUTHORS vendored
View File

@ -1,279 +0,0 @@
# Authors ordered by first contribution.
Ryan Dahl <ryan@joyent.com>
Bert Belder <bertbelder@gmail.com>
Josh Roesslein <jroesslein@gmail.com>
Alan Gutierrez <alan@prettyrobots.com>
Joshua Peek <josh@joshpeek.com>
Igor Zinkovsky <igorzi@microsoft.com>
San-Tai Hsu <vanilla@fatpipi.com>
Ben Noordhuis <info@bnoordhuis.nl>
Henry Rawas <henryr@schakra.com>
Robert Mustacchi <rm@joyent.com>
Matt Stevens <matt@alloysoft.com>
Paul Querna <pquerna@apache.org>
Shigeki Ohtsu <ohtsu@iij.ad.jp>
Tom Hughes <tom.hughes@palm.com>
Peter Bright <drpizza@quiscalusmexicanus.org>
Jeroen Janssen <jeroen.janssen@gmail.com>
Andrea Lattuada <ndr.lattuada@gmail.com>
Augusto Henrique Hentz <ahhentz@gmail.com>
Clifford Heath <clifford.heath@gmail.com>
Jorge Chamorro Bieling <jorge@jorgechamorro.com>
Luis Lavena <luislavena@gmail.com>
Matthew Sporleder <msporleder@gmail.com>
Erick Tryzelaar <erick.tryzelaar@gmail.com>
Isaac Z. Schlueter <i@izs.me>
Pieter Noordhuis <pcnoordhuis@gmail.com>
Marek Jelen <marek@jelen.biz>
Fedor Indutny <fedor.indutny@gmail.com>
Saúl Ibarra Corretgé <saghul@gmail.com>
Felix Geisendörfer <felix@debuggable.com>
Yuki Okumura <mjt@cltn.org>
Roman Shtylman <shtylman@gmail.com>
Frank Denis <github@pureftpd.org>
Carter Allen <CarterA@opt-6.com>
Tj Holowaychuk <tj@vision-media.ca>
Shimon Doodkin <helpmepro1@gmail.com>
Ryan Emery <seebees@gmail.com>
Bruce Mitchener <bruce.mitchener@gmail.com>
Maciej Małecki <maciej.malecki@notimplemented.org>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Daisuke Murase <typester@cpan.org>
Paddy Byers <paddy.byers@gmail.com>
Dan VerWeire <dverweire@gmail.com>
Brandon Benvie <brandon@bbenvie.com>
Brandon Philips <brandon.philips@rackspace.com>
Nathan Rajlich <nathan@tootallnate.net>
Charlie McConnell <charlie@charlieistheman.com>
Vladimir Dronnikov <dronnikov@gmail.com>
Aaron Bieber <qbit@deftly.net>
Bulat Shakirzyanov <mallluhuct@gmail.com>
Brian White <mscdex@mscdex.net>
Erik Dubbelboer <erik@dubbelboer.com>
Keno Fischer <kenof@stanford.edu>
Ira Cooper <Ira.Cooper@mathworks.com>
Andrius Bentkus <andrius.bentkus@gmail.com>
Iñaki Baz Castillo <ibc@aliax.net>
Mark Cavage <mark.cavage@joyent.com>
George Yohng <georgegh@oss3d.com>
Xidorn Quan <quanxunzhen@gmail.com>
Roman Neuhauser <rneuhauser@suse.cz>
Shuhei Tanuma <shuhei.tanuma@gmail.com>
Bryan Cantrill <bcantrill@acm.org>
Trond Norbye <trond.norbye@gmail.com>
Tim Holy <holy@wustl.edu>
Prancesco Pertugio <meh@schizofreni.co>
Leonard Hecker <leonard.hecker91@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Luigi Grilli <luigi.grilli@gmail.com>
Shannen Saez <shannenlaptop@gmail.com>
Artur Adib <arturadib@gmail.com>
Hiroaki Nakamura <hnakamur@gmail.com>
Ting-Yu Lin <ph.minamo@cytisan.com>
Stephen Gallagher <sgallagh@redhat.com>
Shane Holloway <shane.holloway@ieee.org>
Andrew Shaffer <darawk@gmail.com>
Vlad Tudose <vlad.tudose@intel.com>
Ben Leslie <benno@benno.id.au>
Tim Bradshaw <tfb@cley.com>
Timothy J. Fontaine <tjfontaine@gmail.com>
Marc Schlaich <marc.schlaich@googlemail.com>
Brian Mazza <louseman@gmail.com>
Elliot Saba <staticfloat@gmail.com>
Ben Kelly <ben@wanderview.com>
Nils Maier <maierman@web.de>
Nicholas Vavilov <vvnicholas@gmail.com>
Miroslav Bajtoš <miro.bajtos@gmail.com>
Sean Silva <chisophugis@gmail.com>
Wynn Wilkes <wynnw@movenetworks.com>
Andrei Sedoi <bsnote@gmail.com>
Alex Crichton <alex@alexcrichton.com>
Brent Cook <brent@boundary.com>
Brian Kaisner <bkize1@gmail.com>
Luca Bruno <lucab@debian.org>
Reini Urban <rurban@cpanel.net>
Maks Naumov <maksqwe1@ukr.net>
Sean Farrell <sean.farrell@rioki.org>
Chris Bank <cbank@adobe.com>
Geert Jansen <geertj@gmail.com>
Christoph Iserlohn <christoph.iserlohn@innoq.com>
Steven Kabbes <stevenkabbes@gmail.com>
Alex Gaynor <alex.gaynor@gmail.com>
huxingyi <huxingyi@msn.com>
Tenor Biel <tenorbiel@gmail.com>
Andrej Manduch <AManduch@gmail.com>
Joshua Neuheisel <joshua@neuheisel.us>
Alexis Campailla <alexis@janeasystems.com>
Yazhong Liu <yorkiefixer@gmail.com>
Sam Roberts <vieuxtech@gmail.com>
River Tarnell <river@loreley.flyingparchment.org.uk>
Nathan Sweet <nathanjsweet@gmail.com>
Trevor Norris <trev.norris@gmail.com>
Oguz Bastemur <obastemur@gmail.com>
Dylan Cali <calid1984@gmail.com>
Austin Foxley <austinf@cetoncorp.com>
Benjamin Saunders <ben.e.saunders@gmail.com>
Geoffry Song <goffrie@gmail.com>
Rasmus Christian Pedersen <ruysch@outlook.com>
William Light <wrl@illest.net>
Oleg Efimov <o.efimov@corp.badoo.com>
Lars Gierth <larsg@systemli.org>
Rasmus Christian Pedersen <zerhacken@yahoo.com>
Justin Venus <justin.venus@gmail.com>
Kristian Evensen <kristian.evensen@gmail.com>
Linus Mårtensson <linus.martensson@sonymobile.com>
Navaneeth Kedaram Nambiathan <navaneethkn@gmail.com>
Yorkie <yorkiefixer@gmail.com>
StarWing <weasley.wx@gmail.com>
thierry-FreeBSD <thierry@FreeBSD.org>
Isaiah Norton <isaiah.norton@gmail.com>
Raul Martins <raulms.martins@gmail.com>
David Capello <davidcapello@gmail.com>
Paul Tan <pyokagan@gmail.com>
Javier Hernández <jhernandez@emergya.com>
Tonis Tiigi <tonistiigi@gmail.com>
Norio Kobota <nori.0428@gmail.com>
李港平 <chopdown@gmail.com>
Chernyshev Viacheslav <astellar@ro.ru>
Stephen von Takach <steve@advancedcontrol.com.au>
JD Ballard <jd@pixelandline.com>
Luka Perkov <luka.perkov@sartura.hr>
Ryan Cole <ryan@rycole.com>
HungMingWu <u9089000@gmail.com>
Jay Satiro <raysatiro@yahoo.com>
Leith Bade <leith@leithalweapon.geek.nz>
Peter Atashian <retep998@gmail.com>
Tim Cooper <tim.cooper@layeh.com>
Caleb James DeLisle <cjd@hyperboria.ca>
Jameson Nash <vtjnash@gmail.com>
Graham Lee <ghmlee@ghmlee.com>
Andrew Low <Andrew_Low@ca.ibm.com>
Pavel Platto <hinidu@gmail.com>
Tony Kelman <tony@kelman.net>
John Firebaugh <john.firebaugh@gmail.com>
lilohuang <lilohuang@hotmail.com>
Paul Goldsmith <paul.goldsmith@aplink.net>
Julien Gilli <julien.gilli@joyent.com>
Michael Hudson-Doyle <michael.hudson@linaro.org>
Recep ASLANTAS <m@recp.me>
Rob Adams <readams@readams.net>
Zachary Newman <znewman01@gmail.com>
Robin Hahling <robin.hahling@gw-computing.net>
Jeff Widman <jeff@jeffwidman.com>
cjihrig <cjihrig@gmail.com>
Tomasz Kołodziejski <tkolodziejski@mozilla.com>
Unknown W. Brackets <checkins@unknownbrackets.org>
Emmanuel Odeke <odeke@ualberta.ca>
Mikhail Mukovnikov <yndi@me.com>
Thorsten Lorenz <thlorenz@gmx.de>
Yuri D'Elia <yuri.delia@eurac.edu>
Manos Nikolaidis <manos@shadowrobot.com>
Elijah Andrews <elijah@busbud.com>
Michael Ira Krufky <m.krufky@samsung.com>
Helge Deller <deller@gmx.de>
Joey Geralnik <jgeralnik@gmail.com>
Tim Caswell <tim@creationix.com>
Logan Rosen <loganrosen@gmail.com>
Kenneth Perry <thothonegan@gmail.com>
John Marino <marino@FreeBSD.org>
Alexey Melnichuk <mimir@newmail.ru>
Johan Bergström <bugs@bergstroem.nu>
Alex Mo <almosnow@gmail.com>
Luis Martinez de Bartolome <lasote@gmail.com>
Michael Penick <michael.penick@datastax.com>
Michael <michael_dawson@ca.ibm.com>
Massimiliano Torromeo <massimiliano.torromeo@gmail.com>
TomCrypto <thomas.beneteau@yahoo.fr>
Brett Vickers <brett@beevik.com>
Ole André Vadla Ravnås <oleavr@gmail.com>
Kazuho Oku <kazuhooku@gmail.com>
Ryan Phillips <ryan.phillips@rackspace.com>
Brian Green <briangreenery@gmail.com>
Devchandra Meetei Leishangthem <dlmeetei@gmail.com>
Corey Farrell <git@cfware.com>
Per Nilsson <pni@qlik.com>
Alan Rogers <alanjrogers@me.com>
Daryl Haresign <github@daryl.haresign.com>
Rui Abreu Ferreira <raf-ep@gmx.com>
João Reis <reis@janeasystems.com>
farblue68 <farblue68@gmail.com>
Jason Williams <necmon@yahoo.com>
Igor Soarez <igorsoarez@gmail.com>
Miodrag Milanovic <mmicko@gmail.com>
Cheng Zhao <zcbenz@gmail.com>
Michael Neumann <mneumann@think.localnet>
Stefano Cristiano <stefanocristiano82@gmail.com>
heshamsafi <hesham.safi.eldeen@gmail.com>
A. Hauptmann <andreashauptmann@t-online.de>
John McNamee <jpm@microwiz.com>
Yosuke Furukawa <yosuke.furukawa@gmail.com>
Santiago Gimeno <santiago.gimeno@quantion.es>
guworks <ground.up.works@gmail.com>
RossBencina <rossb@audiomulch.com>
Roger A. Light <roger@atchoo.org>
chenttuuvv <chenttuuvv@yahoo.com>
Richard Lau <riclau@uk.ibm.com>
ronkorving <rkorving@wizcorp.jp>
Corbin Simpson <MostAwesomeDude@gmail.com>
Zachary Hamm <zsh@imipolexg.org>
Karl Skomski <karl@skomski.com>
Jeremy Whitlock <jwhitlock@apache.org>
Willem Thiart <himself@willemthiart.com>
Ben Trask <bentrask@comcast.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Colin Snover <github.com@zetafleet.com>
Sakthipriyan Vairamani <thechargingvolcano@gmail.com>
Eli Skeggs <skeggse@gmail.com>
nmushell <nmushell@bloomberg.net>
Gireesh Punathil <gpunathi@in.ibm.com>
Ryan Johnston <ryan@mediapixel.co.nz>
Adam Stylinski <stylinae@mail.uc.edu>
Nathan Corvino <nathan@corvino.com>
Wink Saville <wink@saville.com>
Angel Leon <gubatron@gmail.com>
Louis DeJardin <lodejard@microsoft.com>
Imran Iqbal <imrani@ca.ibm.com>
Petka Antonov <petka_antonov@hotmail.com>
Ian Kronquist <iankronquist@teleport.com>
kkdaemon <kkdaemon@gmail.com>
Yuval Brik <yuval@brik.org.il>
Joran Dirk Greef <joran@ronomon.com>
Andrey Mazo <andrey.mazo@fidelissecurity.com>
sztomi <hello.sztomi@gmail.com>
Martin Bark <martin@barkynet.com>
Dave <dave@jut.io>
Alexis Murzeau <amubtdx@gmail.com>
Didiet <lynxluna@gmail.com>
Nan Xiang <514580344@qq.com>
Samuel Lorétan <sloretan@riotgames.com>
Nándor István Krácser <bonifaido@gmail.com>
Katsutoshi Horie <mps299792458@gmail.com>
Lukasz Jagiello <lukasz@wikia-inc.com>
Robert Chiras <robert.chiras@intel.com>
Kári Tristan Helgason <kthelgason@gmail.com>
Krishnaraj Bhat <krrishnarraj@gmail.com>
Enno Boland <g@s01.de>
Michael Fero <michael.fero@datastax.com>
Robert Jefe Lindstaedt <robert.lindstaedt@gmail.com>
Myles Borins <myles.borins@gmail.com>
Tony Theodore <tonyt@logyst.com>
Jason Ginchereau <jasongin@microsoft.com>
Nicolas Cavallari <nicolas.cavallari@green-communications.fr>
Pierre-Marie de Rodat <pmderodat@kawie.fr>
Brian Maher <brian@brimworks.com>
neevek <i@neevek.net>
John Barboza <jbarboza@ca.ibm.com>
liuxiaobo <icexile@qq.com>
Michele Caini <michele.caini@gmail.com>
Bartosz Sosnowski <bartosz@janeasystems.com>
Matej Knopp <matej.knopp@gmail.com>
sunjin.lee <kod21236@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Jeffrey Clark <dude@zaplabs.com>
Bart Robinson <bartarr@gmail.com>
Vit Gottwald <vit.gottwald@gmail.com>
Vladimír Čunát <vladimir.cunat@nic.cz>
Alex Hultman <alexhultman@gmail.com>
Brad King <brad.king@kitware.com>
Philippe Laferriere <laferriere.phil@gmail.com>
Will Speak <lithiumflame@gmail.com>

View File

@ -1,169 +0,0 @@
# CONTRIBUTING
The libuv project welcomes new contributors. This document will guide you
through the process.
### FORK
Fork the project [on GitHub](https://github.com/libuv/libuv) and check out
your copy.
```
$ git clone https://github.com/username/libuv.git
$ cd libuv
$ git remote add upstream https://github.com/libuv/libuv.git
```
Now decide if you want your feature or bug fix to go into the master branch
or the stable branch. As a rule of thumb, bug fixes go into the stable branch
while new features go into the master branch.
The stable branch is effectively frozen; patches that change the libuv
API/ABI or affect the run-time behavior of applications get rejected.
In case of doubt, open an issue in the [issue tracker][], post your question
to the [libuv mailing list], or contact one of [project maintainers][] on [IRC][].
Especially do so if you plan to work on something big. Nothing is more
frustrating than seeing your hard work go to waste because your vision
does not align with that of a project maintainers.
### BRANCH
Okay, so you have decided on the proper branch. Create a feature branch
and start hacking:
```
$ git checkout -b my-feature-branch -t origin/v1.x
```
(Where v1.x is the latest stable branch as of this writing.)
### CODE
Please adhere to libuv's code style. In general it follows the conventions from
the [Google C/C++ style guide]. Some of the key points, as well as some
additional guidelines, are enumerated below.
* Code that is specific to unix-y platforms should be placed in `src/unix`, and
declarations go into `include/uv-unix.h`.
* Source code that is Windows-specific goes into `src/win`, and related
publicly exported types, functions and macro declarations should generally
be declared in `include/uv-win.h`.
* Names should be descriptive and concise.
* All the symbols and types that libuv makes available publicly should be
prefixed with `uv_` (or `UV_` in case of macros).
* Internal, non-static functions should be prefixed with `uv__`.
* Use two spaces and no tabs.
* Lines should be wrapped at 80 characters.
* Ensure that lines have no trailing whitespace, and use unix-style (LF) line
endings.
* Use C89-compliant syntax. In other words, variables can only be declared at
the top of a scope (function, if/for/while-block).
* When writing comments, use properly constructed sentences, including
punctuation.
* When documenting APIs and/or source code, don't make assumptions or make
implications about race, gender, religion, political orientation or anything
else that isn't relevant to the project.
* Remember that source code usually gets written once and read often: ensure
the reader doesn't have to make guesses. Make sure that the purpose and inner
logic are either obvious to a reasonably skilled professional, or add a
comment that explains it.
### COMMIT
Make sure git knows your name and email address:
```
$ git config --global user.name "J. Random User"
$ git config --global user.email "j.random.user@example.com"
```
Writing good commit logs is important. A commit log should describe what
changed and why. Follow these guidelines when writing one:
1. The first line should be 50 characters or less and contain a short
description of the change prefixed with the name of the changed
subsystem (e.g. "net: add localAddress and localPort to Socket").
2. Keep the second line blank.
3. Wrap all other lines at 72 columns.
A good commit log looks like this:
```
subsystem: explaining the commit in one line
Body of commit message is a few lines of text, explaining things
in more detail, possibly giving some background about the issue
being fixed, etc etc.
The body of the commit message can be several paragraphs, and
please do proper word-wrap and keep columns shorter than about
72 characters or so. That way `git log` will show things
nicely even when it is indented.
```
The header line should be meaningful; it is what other people see when they
run `git shortlog` or `git log --oneline`.
Check the output of `git log --oneline files_that_you_changed` to find out
what subsystem (or subsystems) your changes touch.
### REBASE
Use `git rebase` (not `git merge`) to sync your work from time to time.
```
$ git fetch upstream
$ git rebase upstream/v1.x # or upstream/master
```
### TEST
Bug fixes and features should come with tests. Add your tests in the
`test/` directory. Each new test needs to be registered in `test/test-list.h`. If you add a new test file, it needs to be registered in two places:
- `Makefile.am`: add the file's name to the `test_run_tests_SOURCES` list.
- `uv.gyp`: add the file's name to the `sources` list in the `run-tests` target.
Look at other tests to see how they should be structured (license boilerplate,
the way entry points are declared, etc.).
Check README.md file to find out how to run the test suite and make sure that
there are no test regressions.
### PUSH
```
$ git push origin my-feature-branch
```
Go to https://github.com/username/libuv and select your feature branch. Click
the 'Pull Request' button and fill out the form.
Pull requests are usually reviewed within a few days. If there are comments
to address, apply your changes in a separate commit and push that to your
feature branch. Post a comment in the pull request afterwards; GitHub does
not send out notifications when you add commits.
[issue tracker]: https://github.com/libuv/libuv/issues
[libuv mailing list]: http://groups.google.com/group/libuv
[IRC]: http://webchat.freelibuv.net/?channels=libuv
[Google C/C++ style guide]: https://google.github.io/styleguide/cppguide.html
[project maintainers]: https://github.com/libuv/libuv/blob/master/MAINTAINERS.md

2818
deps/libuv/ChangeLog vendored

File diff suppressed because it is too large Load Diff

70
deps/libuv/LICENSE vendored
View File

@ -1,70 +0,0 @@
libuv is licensed for use as follows:
====
Copyright (c) 2015-present libuv project contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to parts of libuv originating from the
https://github.com/joyent/libuv repository:
====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to all parts of libuv that are not externally
maintained libraries.
The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
- inet_pton and inet_ntop implementations, contained in src/inet.c, are
copyright the Internet Systems Consortium, Inc., and licensed under the ISC
license.
- stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
clause BSD license.
- pthread-fixes.h, pthread-fixes.c, copyright Google Inc. and Sony Mobile
Communications AB. Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
n° 289016). Three clause BSD license.

View File

@ -1,41 +0,0 @@
# Project Maintainers
libuv is currently managed by the following individuals:
* **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis))
- GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis)
* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus))
* **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig))
- GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig)
* **Fedor Indutny** ([@indutny](https://github.com/indutny))
- GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny)
* **Imran Iqbal** ([@iWuzHere](https://github.com/iWuzHere))
- GPG key: 9DFE AA5F 481B BF77 2D90 03CE D592 4925 2F8E C41A (pubkey-iwuzhere)
* **Santiago Gimeno** ([@santigimeno](https://github.com/santigimeno))
- GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno)
* **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul))
- GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul)
## Storing a maintainer key in Git
It's quite handy to store a maintainer's signature as a git blob, and have
that object tagged and signed with such key.
Export your public key:
$ gpg --armor --export saghul@gmail.com > saghul.asc
Store it as a blob on the repo:
$ git hash-object -w saghul.asc
The previous command returns a hash, copy it. For the sake of this explanation,
we'll assume it's 'abcd1234'. Storing the blob in git is not enough, it could
be garbage collected since nothing references it, so we'll create a tag for it:
$ git tag -s pubkey-saghul abcd1234
Commit the changes and push:
$ git push origin pubkey-saghul

412
deps/libuv/Makefile.am vendored
View File

@ -1,412 +0,0 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ACLOCAL_AMFLAGS = -I m4
AM_CPPFLAGS = -I$(top_srcdir)/include \
-I$(top_srcdir)/src
include_HEADERS=include/uv.h include/uv-errno.h include/uv-threadpool.h include/uv-version.h
CLEANFILES =
lib_LTLIBRARIES = libuv.la
libuv_la_CFLAGS = @CFLAGS@
libuv_la_LDFLAGS = -no-undefined -version-info 1:0:0
libuv_la_SOURCES = src/fs-poll.c \
src/heap-inl.h \
src/inet.c \
src/queue.h \
src/threadpool.c \
src/uv-common.c \
src/uv-common.h \
src/version.c
if SUNOS
# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers
# on other platforms complain that the argument is unused during compilation.
libuv_la_CFLAGS += -pthreads
endif
if WINNT
include_HEADERS += include/uv-win.h include/tree.h
AM_CPPFLAGS += -I$(top_srcdir)/src/win \
-DWIN32_LEAN_AND_MEAN \
-D_WIN32_WINNT=0x0600
libuv_la_SOURCES += src/win/async.c \
src/win/atomicops-inl.h \
src/win/core.c \
src/win/detect-wakeup.c \
src/win/dl.c \
src/win/error.c \
src/win/fs-event.c \
src/win/fs.c \
src/win/getaddrinfo.c \
src/win/getnameinfo.c \
src/win/handle.c \
src/win/handle-inl.h \
src/win/internal.h \
src/win/loop-watcher.c \
src/win/pipe.c \
src/win/poll.c \
src/win/process-stdio.c \
src/win/process.c \
src/win/req.c \
src/win/req-inl.h \
src/win/signal.c \
src/win/stream.c \
src/win/stream-inl.h \
src/win/tcp.c \
src/win/thread.c \
src/win/timer.c \
src/win/tty.c \
src/win/udp.c \
src/win/util.c \
src/win/winapi.c \
src/win/winapi.h \
src/win/winsock.c \
src/win/winsock.h
else # WINNT
include_HEADERS += include/uv-unix.h
AM_CPPFLAGS += -I$(top_srcdir)/src/unix
libuv_la_SOURCES += src/unix/async.c \
src/unix/atomic-ops.h \
src/unix/core.c \
src/unix/dl.c \
src/unix/fs.c \
src/unix/getaddrinfo.c \
src/unix/getnameinfo.c \
src/unix/internal.h \
src/unix/loop-watcher.c \
src/unix/loop.c \
src/unix/pipe.c \
src/unix/poll.c \
src/unix/process.c \
src/unix/signal.c \
src/unix/spinlock.h \
src/unix/stream.c \
src/unix/tcp.c \
src/unix/thread.c \
src/unix/timer.c \
src/unix/tty.c \
src/unix/udp.c
endif # WINNT
EXTRA_DIST = test/fixtures/empty_file \
test/fixtures/load_error.node \
include \
test \
docs \
img \
samples \
android-configure \
CONTRIBUTING.md \
LICENSE \
README.md \
checksparse.sh \
vcbuild.bat \
Makefile.mingw \
common.gypi \
gyp_uv.py \
uv.gyp
TESTS = test/run-tests
check_PROGRAMS = test/run-tests
if OS390
test_run_tests_CFLAGS =
else
test_run_tests_CFLAGS = -Wno-long-long
endif
if SUNOS
# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers
# on other platforms complain that the argument is unused during compilation.
test_run_tests_CFLAGS += -pthreads
endif
test_run_tests_LDFLAGS =
test_run_tests_SOURCES = test/blackhole-server.c \
test/dns-server.c \
test/echo-server.c \
test/run-tests.c \
test/runner.c \
test/runner.h \
test/task.h \
test/test-active.c \
test/test-async.c \
test/test-async-null-cb.c \
test/test-barrier.c \
test/test-callback-order.c \
test/test-callback-stack.c \
test/test-close-fd.c \
test/test-close-order.c \
test/test-condvar.c \
test/test-connection-fail.c \
test/test-cwd-and-chdir.c \
test/test-default-loop-close.c \
test/test-delayed-accept.c \
test/test-dlerror.c \
test/test-eintr-handling.c \
test/test-embed.c \
test/test-emfile.c \
test/test-error.c \
test/test-fail-always.c \
test/test-fs-event.c \
test/test-fs-poll.c \
test/test-fs.c \
test/test-get-currentexe.c \
test/test-get-loadavg.c \
test/test-get-memory.c \
test/test-get-passwd.c \
test/test-getaddrinfo.c \
test/test-getnameinfo.c \
test/test-getsockname.c \
test/test-handle-fileno.c \
test/test-homedir.c \
test/test-hrtime.c \
test/test-idle.c \
test/test-ip4-addr.c \
test/test-ip6-addr.c \
test/test-ipc-send-recv.c \
test/test-ipc.c \
test/test-list.h \
test/test-loop-handles.c \
test/test-loop-alive.c \
test/test-loop-close.c \
test/test-loop-stop.c \
test/test-loop-time.c \
test/test-loop-configure.c \
test/test-multiple-listen.c \
test/test-mutexes.c \
test/test-osx-select.c \
test/test-pass-always.c \
test/test-ping-pong.c \
test/test-pipe-bind-error.c \
test/test-pipe-connect-error.c \
test/test-pipe-connect-multiple.c \
test/test-pipe-connect-prepare.c \
test/test-pipe-getsockname.c \
test/test-pipe-pending-instances.c \
test/test-pipe-sendmsg.c \
test/test-pipe-server-close.c \
test/test-pipe-close-stdout-read-stdin.c \
test/test-pipe-set-non-blocking.c \
test/test-platform-output.c \
test/test-poll-close.c \
test/test-poll-close-doesnt-corrupt-stack.c \
test/test-poll-closesocket.c \
test/test-poll.c \
test/test-process-title.c \
test/test-queue-foreach-delete.c \
test/test-ref.c \
test/test-run-nowait.c \
test/test-run-once.c \
test/test-semaphore.c \
test/test-shutdown-close.c \
test/test-shutdown-eof.c \
test/test-shutdown-twice.c \
test/test-signal-multiple-loops.c \
test/test-signal.c \
test/test-socket-buffer-size.c \
test/test-spawn.c \
test/test-stdio-over-pipes.c \
test/test-tcp-alloc-cb-fail.c \
test/test-tcp-bind-error.c \
test/test-tcp-bind6-error.c \
test/test-tcp-close-accept.c \
test/test-tcp-close-while-connecting.c \
test/test-tcp-close.c \
test/test-tcp-create-socket-early.c \
test/test-tcp-connect-error-after-write.c \
test/test-tcp-connect-error.c \
test/test-tcp-connect-timeout.c \
test/test-tcp-connect6-error.c \
test/test-tcp-flags.c \
test/test-tcp-open.c \
test/test-tcp-read-stop.c \
test/test-tcp-shutdown-after-write.c \
test/test-tcp-unexpected-read.c \
test/test-tcp-oob.c \
test/test-tcp-write-to-half-open-connection.c \
test/test-tcp-write-after-connect.c \
test/test-tcp-writealot.c \
test/test-tcp-write-fail.c \
test/test-tcp-try-write.c \
test/test-tcp-write-queue-order.c \
test/test-thread-equal.c \
test/test-thread.c \
test/test-threadpool-cancel.c \
test/test-threadpool.c \
test/test-timer-again.c \
test/test-timer-from-check.c \
test/test-timer.c \
test/test-tmpdir.c \
test/test-tty.c \
test/test-udp-alloc-cb-fail.c \
test/test-udp-bind.c \
test/test-udp-create-socket-early.c \
test/test-udp-dgram-too-big.c \
test/test-udp-ipv6.c \
test/test-udp-multicast-interface.c \
test/test-udp-multicast-interface6.c \
test/test-udp-multicast-join.c \
test/test-udp-multicast-join6.c \
test/test-udp-multicast-ttl.c \
test/test-udp-open.c \
test/test-udp-options.c \
test/test-udp-send-and-recv.c \
test/test-udp-send-immediate.c \
test/test-udp-send-unreachable.c \
test/test-udp-try-send.c \
test/test-walk-handles.c \
test/test-watcher-cross-stop.c
test_run_tests_LDADD = libuv.la
if WINNT
test_run_tests_SOURCES += test/runner-win.c \
test/runner-win.h
else
test_run_tests_SOURCES += test/runner-unix.c \
test/runner-unix.h
endif
if AIX
test_run_tests_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT
endif
if LINUX
test_run_tests_CFLAGS += -D_GNU_SOURCE
endif
if SUNOS
test_run_tests_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500
endif
if OS390
test_run_tests_CFLAGS += -D_UNIX03_THREADS \
-D_UNIX03_SOURCE \
-D_OPEN_SYS_IF_EXT=1 \
-D_OPEN_SYS_SOCK_IPV6 \
-D_OPEN_MSGQ_EXT \
-D_XOPEN_SOURCE_EXTENDED \
-D_ALL_SOURCE \
-D_LARGE_TIME_API \
-D_OPEN_SYS_FILE_EXT \
-DPATH_MAX=255 \
-qCHARS=signed \
-qXPLINK \
-qFLOAT=IEEE
endif
if AIX
libuv_la_CFLAGS += -D_ALL_SOURCE -D_XOPEN_SOURCE=500 -D_LINUX_SOURCE_COMPAT -D_THREAD_SAFE
include_HEADERS += include/uv-aix.h
libuv_la_SOURCES += src/unix/aix.c
endif
if ANDROID
include_HEADERS += include/android-ifaddrs.h \
include/pthread-barrier.h
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c \
src/unix/pthread-barrier.c
endif
if DARWIN
include_HEADERS += include/uv-darwin.h \
include/pthread-barrier.h
libuv_la_CFLAGS += -D_DARWIN_USE_64_BIT_INODE=1
libuv_la_CFLAGS += -D_DARWIN_UNLIMITED_SELECT=1
libuv_la_SOURCES += src/unix/darwin.c \
src/unix/darwin-proctitle.c \
src/unix/fsevents.c \
src/unix/kqueue.c \
src/unix/proctitle.c \
src/unix/pthread-barrier.c
test_run_tests_LDFLAGS += -lutil
endif
if DRAGONFLY
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c
test_run_tests_LDFLAGS += -lutil
endif
if FREEBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/freebsd.c src/unix/kqueue.c
test_run_tests_LDFLAGS += -lutil
endif
if LINUX
include_HEADERS += include/uv-linux.h
libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/linux-core.c \
src/unix/linux-inotify.c \
src/unix/linux-syscalls.c \
src/unix/linux-syscalls.h \
src/unix/proctitle.c
test_run_tests_LDFLAGS += -lutil
endif
if NETBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/kqueue.c src/unix/netbsd.c
test_run_tests_LDFLAGS += -lutil
endif
if OPENBSD
include_HEADERS += include/uv-bsd.h
libuv_la_SOURCES += src/unix/kqueue.c src/unix/openbsd.c
test_run_tests_LDFLAGS += -lutil
endif
if SUNOS
include_HEADERS += include/uv-sunos.h
libuv_la_CFLAGS += -D__EXTENSIONS__ -D_XOPEN_SOURCE=500
libuv_la_SOURCES += src/unix/sunos.c
endif
if OS390
include_HEADERS += include/pthread-fixes.h include/pthread-barrier.h
libuv_la_CFLAGS += -D_UNIX03_THREADS \
-D_UNIX03_SOURCE \
-D_OPEN_SYS_IF_EXT=1 \
-D_OPEN_MSGQ_EXT \
-D_XOPEN_SOURCE_EXTENDED \
-D_ALL_SOURCE \
-D_LARGE_TIME_API \
-D_OPEN_SYS_SOCK_IPV6 \
-D_OPEN_SYS_FILE_EXT \
-DUV_PLATFORM_SEM_T=int \
-DPATH_MAX=255 \
-qCHARS=signed \
-qXPLINK \
-qFLOAT=IEEE
libuv_la_LDFLAGS += -qXPLINK
libuv_la_SOURCES += src/unix/pthread-fixes.c \
src/unix/pthread-barrier.c
libuv_la_SOURCES += src/unix/os390.c
endif
if HAVE_PKG_CONFIG
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = @PACKAGE_NAME@.pc
endif

View File

@ -1,85 +0,0 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
CC ?= gcc
CFLAGS += -Wall \
-Wextra \
-Wno-unused-parameter \
-Iinclude \
-Isrc \
-Isrc/win \
-DWIN32_LEAN_AND_MEAN \
-D_WIN32_WINNT=0x0600
INCLUDES = include/stdint-msvc2008.h \
include/tree.h \
include/uv-errno.h \
include/uv-threadpool.h \
include/uv-version.h \
include/uv-win.h \
include/uv.h \
src/heap-inl.h \
src/queue.h \
src/uv-common.h \
src/win/atomicops-inl.h \
src/win/handle-inl.h \
src/win/internal.h \
src/win/req-inl.h \
src/win/stream-inl.h \
src/win/winapi.h \
src/win/winsock.h
OBJS = src/fs-poll.o \
src/inet.o \
src/threadpool.o \
src/uv-common.o \
src/version.o \
src/win/async.o \
src/win/core.o \
src/win/detect-wakeup.o \
src/win/dl.o \
src/win/error.o \
src/win/fs-event.o \
src/win/fs.o \
src/win/getaddrinfo.o \
src/win/getnameinfo.o \
src/win/handle.o \
src/win/loop-watcher.o \
src/win/pipe.o \
src/win/poll.o \
src/win/process-stdio.o \
src/win/process.o \
src/win/req.o \
src/win/signal.o \
src/win/stream.o \
src/win/tcp.o \
src/win/thread.o \
src/win/timer.o \
src/win/tty.o \
src/win/udp.o \
src/win/util.o \
src/win/winapi.o \
src/win/winsock.o
all: libuv.a
clean:
-$(RM) $(OBJS) libuv.a
libuv.a: $(OBJS)
$(AR) crs $@ $^
$(OBJS): %.o : %.c $(INCLUDES)
$(CC) $(CFLAGS) -c -o $@ $<

250
deps/libuv/README.md vendored
View File

@ -1,250 +0,0 @@
![libuv][libuv_banner]
## Overview
libuv is a multi-platform support library with a focus on asynchronous I/O. It
was primarily developed for use by [Node.js](http://nodejs.org), but it's also
used by [Luvit](http://luvit.io/), [Julia](http://julialang.org/),
[pyuv](https://github.com/saghul/pyuv), and [others](https://github.com/libuv/libuv/wiki/Projects-that-use-libuv).
## Feature highlights
* Full-featured event loop backed by epoll, kqueue, IOCP, event ports.
* Asynchronous TCP and UDP sockets
* Asynchronous DNS resolution
* Asynchronous file and file system operations
* File system events
* ANSI escape code controlled TTY
* IPC with socket sharing, using Unix domain sockets or named pipes (Windows)
* Child processes
* Thread pool
* Signal handling
* High resolution clock
* Threading and synchronization primitives
## Versioning
Starting with version 1.0.0 libuv follows the [semantic versioning](http://semver.org/)
scheme. The API change and backwards compatibility rules are those indicated by
SemVer. libuv will keep a stable ABI across major releases.
The ABI/API changes can be tracked [here](http://abi-laboratory.pro/tracker/timeline/libuv/).
## Licensing
libuv is licensed under the MIT license. Check the [LICENSE file](LICENSE).
## Community
* [Mailing list](http://groups.google.com/group/libuv)
* [IRC chatroom (#libuv@irc.freenode.org)](http://webchat.freenode.net?channels=libuv&uio=d4)
## Documentation
### Official API documentation
Located in the docs/ subdirectory. It uses the [Sphinx](http://sphinx-doc.org/)
framework, which makes it possible to build the documentation in multiple
formats.
Show different supported building options:
$ make help
Build documentation as HTML:
$ make html
Build documentation as HTML and live reload it when it changes (this requires
sphinx-autobuild to be installed and is only supported on Unix):
$ make livehtml
Build documentation as man pages:
$ make man
Build documentation as ePub:
$ make epub
NOTE: Windows users need to use make.bat instead of plain 'make'.
Documentation can be browsed online [here](http://docs.libuv.org).
The [tests and benchmarks](https://github.com/libuv/libuv/tree/master/test)
also serve as API specification and usage examples.
### Other resources
* [An Introduction to libuv](http://nikhilm.github.com/uvbook/)
&mdash; An overview of libuv with tutorials.
* [LXJS 2012 talk](http://www.youtube.com/watch?v=nGn60vDSxQ4)
&mdash; High-level introductory talk about libuv.
* [libuv-dox](https://github.com/thlorenz/libuv-dox)
&mdash; Documenting types and methods of libuv, mostly by reading uv.h.
* [learnuv](https://github.com/thlorenz/learnuv)
&mdash; Learn uv for fun and profit, a self guided workshop to libuv.
These resources are not handled by libuv maintainers and might be out of
date. Please verify it before opening new issues.
## Downloading
libuv can be downloaded either from the
[GitHub repository](https://github.com/libuv/libuv)
or from the [downloads site](http://dist.libuv.org/dist/).
Starting with libuv 1.7.0, binaries for Windows are also provided. This is to
be considered EXPERIMENTAL.
Before verifying the git tags or signature files, importing the relevant keys
is necessary. Key IDs are listed in the
[MAINTAINERS](https://github.com/libuv/libuv/blob/master/MAINTAINERS.md)
file, but are also available as git blob objects for easier use.
Importing a key the usual way:
$ gpg --keyserver pool.sks-keyservers.net \
--recv-keys AE9BC059
Importing a key from a git blob object:
$ git show pubkey-saghul | gpg --import
### Verifying releases
Git tags are signed with the developer's key, they can be verified as follows:
$ git verify-tag v1.6.1
Starting with libuv 1.7.0, the tarballs stored in the
[downloads site](http://dist.libuv.org/dist/) are signed and an accompanying
signature file sit alongside each. Once both the release tarball and the
signature file are downloaded, the file can be verified as follows:
$ gpg --verify libuv-1.7.0.tar.gz.sign
## Build Instructions
For GCC there are two build methods: via autotools or via [GYP][].
GYP is a meta-build system which can generate MSVS, Makefile, and XCode
backends. It is best used for integration into other projects.
To build with autotools:
$ sh autogen.sh
$ ./configure
$ make
$ make check
$ make install
### Windows
First, [Python][] 2.6 or 2.7 must be installed as it is required by [GYP][].
If python is not in your path, set the environment variable `PYTHON` to its
location. For example: `set PYTHON=C:\Python27\python.exe`
To build with Visual Studio, launch a git shell (e.g. Cmd or PowerShell)
and run vcbuild.bat which will checkout the GYP code into build/gyp and
generate uv.sln as well as related project files.
To have GYP generate build script for another system, checkout GYP into the
project tree manually:
$ git clone https://chromium.googlesource.com/external/gyp.git build/gyp
### Unix
For Debug builds (recommended) run:
$ ./gyp_uv.py -f make
$ make -C out
For Release builds run:
$ ./gyp_uv.py -f make
$ BUILDTYPE=Release make -C out
Run `./gyp_uv.py -f make -Dtarget_arch=x32` to build [x32][] binaries.
### OS X
Run:
$ ./gyp_uv.py -f xcode
$ xcodebuild -ARCHS="x86_64" -project uv.xcodeproj \
-configuration Release -target All
Using Homebrew:
$ brew install --HEAD libuv
Note to OS X users:
Make sure that you specify the architecture you wish to build for in the
"ARCHS" flag. You can specify more than one by delimiting with a space
(e.g. "x86_64 i386").
### Android
Run:
$ source ./android-configure NDK_PATH gyp
$ make -C out
Note for UNIX users: compile your project with `-D_LARGEFILE_SOURCE` and
`-D_FILE_OFFSET_BITS=64`. GYP builds take care of that automatically.
### Using Ninja
To use ninja for build on ninja supported platforms, run:
$ ./gyp_uv.py -f ninja
$ ninja -C out/Debug #for debug build OR
$ ninja -C out/Release
### Running tests
Run:
$ ./gyp_uv.py -f make
$ make -C out
$ ./out/Debug/run-tests
## Supported Platforms
Check the [SUPPORTED_PLATFORMS file](SUPPORTED_PLATFORMS.md).
### AIX Notes
AIX support for filesystem events requires the non-default IBM `bos.ahafs`
package to be installed. This package provides the AIX Event Infrastructure
that is detected by `autoconf`.
[IBM documentation](http://www.ibm.com/developerworks/aix/library/au-aix_event_infrastructure/)
describes the package in more detail.
AIX support for filesystem events is not compiled when building with `gyp`.
## Patches
See the [guidelines for contributing][].
[node.js]: http://nodejs.org/
[GYP]: http://code.google.com/p/gyp/
[Python]: https://www.python.org/downloads/
[guidelines for contributing]: https://github.com/libuv/libuv/blob/master/CONTRIBUTING.md
[libuv_banner]: https://raw.githubusercontent.com/libuv/libuv/master/img/banner.png
[x32]: https://en.wikipedia.org/wiki/X32_ABI

View File

@ -1,70 +0,0 @@
# Supported platforms
| System | Support type | Supported versions | Notes |
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.18 with glibc >= 2.5 | |
| macOS | Tier 1 | macOS >= 10.7 | |
| Windows | Tier 1 | Windows >= XP SP1 | MSVC 2008 and later are supported |
| FreeBSD | Tier 1 | >= 9 (see note) | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
| Linux with musl | Tier 2 | musl >= 1.0 | |
| SunOS | Tier 2 | Solaris 121 and later | Maintainers: @libuv/sunos |
| MinGW | Tier 3 | MinGW32 and MinGW-w64 | |
| Other | Tier 3 | N/A | |
#### Note on FreeBSD 9
While FreeBSD is supported as Tier 1, FreeBSD 9 will get Tier 2 support until
it reaches end of life, in December 2016.
## Support types
* **Tier 1**: Officially supported and tested with CI. Any contributed patch
MUST NOT break such systems. These are supported by @libuv/collaborators.
* **Tier 2**: Officially supported, but not necessarily tested with CI. These
systems are maintained to the best of @libuv/collaborators ability,
without being a top priority.
* **Tier 3**: Community maintained. These systems may inadvertently break and the
community and interested parties are expected to help with the maintenance.
## Adding support for a new platform
**IMPORTANT**: Before attempting to add support for a new platform please open
an issue about it for discussion.
### Unix
I/O handling is abstracted by an internal `uv__io_t` handle. The new platform
will need to implement some of the functions, the prototypes are in
``src/unix/internal.h``.
If the new platform requires extra fields for any handle structure, create a
new include file in ``include/`` with the name ``uv-theplatform.h`` and add
the appropriate defines there.
All functionality related to the new platform must be implemented in its own
file inside ``src/unix/`` unless it's already done in a common file, in which
case adding an `ifdef` is fine.
Two build systems are supported: autotools and GYP. Ideally both need to be
supported, but if GYP does not support the new platform it can be left out.
### Windows
Windows is treated as a single platform, so adding support for a new platform
would mean adding support for a new version.
Compilation and runtime must succeed for the minimum supported version. If a
new API is to be used, it must be done optionally, only in supported versions.
### Common
Some common notes when adding support for new platforms:
* Generally libuv tries to avoid compile time checks. Do not add any to the
autotools based build system or use version checking macros.
Dynamically load functions and symbols if they are not supported by the
minimum supported version.

View File

@ -1,20 +0,0 @@
#!/bin/bash
export TOOLCHAIN=$PWD/android-toolchain
mkdir -p $TOOLCHAIN
$1/build/tools/make-standalone-toolchain.sh \
--toolchain=arm-linux-androideabi-4.9 \
--arch=arm \
--install-dir=$TOOLCHAIN \
--platform=android-21
export PATH=$TOOLCHAIN/bin:$PATH
export AR=arm-linux-androideabi-ar
export CC=arm-linux-androideabi-gcc
export CXX=arm-linux-androideabi-g++
export LINK=arm-linux-androideabi-g++
export PLATFORM=android
if [[ $2 == 'gyp' ]]
then
./gyp_uv.py -Dtarget_arch=arm -DOS=android -f make-android
fi

View File

@ -1,36 +0,0 @@
version: v1.10.0.build{build}
install:
- cinst -y nsis
matrix:
fast_finish: true
allow_failures:
- platform: x86
configuration: Release
- platform: x64
configuration: Release
platform:
- x86
- x64
configuration:
- Release
build_script:
# Fixed tag version number if using a tag.
- cmd: if "%APPVEYOR_REPO_TAG%" == "true" set APPVEYOR_BUILD_VERSION=%APPVEYOR_REPO_TAG_NAME%
# vcbuild overwrites the platform variable.
- cmd: set ARCH=%platform%
- cmd: vcbuild.bat release %ARCH% shared
after_build:
- '"%PROGRAMFILES(x86)%\NSIS\makensis" /DVERSION=%APPVEYOR_BUILD_VERSION% /DARCH=%ARCH% libuv.nsi'
artifacts:
- name: Installer
path: 'libuv-*.exe'
cache:
- C:\projects\libuv\build\gyp

46
deps/libuv/autogen.sh vendored
View File

@ -1,46 +0,0 @@
#!/bin/sh
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
cd `dirname "$0"`
if [ "$LIBTOOLIZE" = "" ] && [ "`uname`" = "Darwin" ]; then
LIBTOOLIZE=glibtoolize
fi
ACLOCAL=${ACLOCAL:-aclocal}
AUTOCONF=${AUTOCONF:-autoconf}
AUTOMAKE=${AUTOMAKE:-automake}
LIBTOOLIZE=${LIBTOOLIZE:-libtoolize}
automake_version=`"$AUTOMAKE" --version | head -n 1 | sed 's/[^.0-9]//g'`
automake_version_major=`echo "$automake_version" | cut -d. -f1`
automake_version_minor=`echo "$automake_version" | cut -d. -f2`
UV_EXTRA_AUTOMAKE_FLAGS=
if test "$automake_version_major" -gt 1 || \
test "$automake_version_major" -eq 1 && \
test "$automake_version_minor" -gt 11; then
# serial-tests is available in v1.12 and newer.
UV_EXTRA_AUTOMAKE_FLAGS="$UV_EXTRA_AUTOMAKE_FLAGS serial-tests"
fi
echo "m4_define([UV_EXTRA_AUTOMAKE_FLAGS], [$UV_EXTRA_AUTOMAKE_FLAGS])" \
> m4/libuv-extra-automake-flags.m4
set -ex
"$LIBTOOLIZE" --copy
"$ACLOCAL" -I m4
"$AUTOCONF"
"$AUTOMAKE" --add-missing --copy

View File

@ -1 +0,0 @@
*.pyc

View File

@ -1,11 +0,0 @@
language: cpp
compiler:
- clang
before_install: ./buildbot/travis-checkout.sh
script: ./buildbot/travis-test.sh
os:
- linux
- osx
branches:
only:
- master

View File

@ -1,16 +0,0 @@
# Names should be added to this file like so:
# Name or Organization <email address>
Google Inc. <*@google.com>
Bloomberg Finance L.P. <*@bloomberg.net>
IBM Inc. <*@*.ibm.com>
Yandex LLC <*@yandex-team.ru>
Steven Knight <knight@baldmt.com>
Ryan Norton <rnorton10@gmail.com>
David J. Sankel <david@sankelsoftware.com>
Eric N. Vander Weele <ericvw@gmail.com>
Tom Freudenberg <th.freudenberg@gmail.com>
Julien Brianceau <jbriance@cisco.com>
Refael Ackermann <refack@gmail.com>
Jiajie Hu <jiajie.hu@intel.com>

View File

@ -1,23 +0,0 @@
# DEPS file for gclient use in buildbot execution of gyp tests.
#
# (You don't need to use gclient for normal GYP development work.)
vars = {
"chromium_git": "https://chromium.googlesource.com/",
}
deps = {
}
deps_os = {
"win": {
"third_party/cygwin":
Var("chromium_git") + "chromium/deps/cygwin@4fbd5b9",
"third_party/python_26":
Var("chromium_git") + "chromium/deps/python_26@5bb4080",
"src/third_party/pefile":
Var("chromium_git") + "external/pefile@72c6ae4",
},
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1 +0,0 @@
*

View File

@ -1,126 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def _LicenseHeader(input_api):
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
return (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=_LicenseHeader(input_api)))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=_LicenseHeader(input_api)))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report

View File

@ -1,4 +0,0 @@
GYP can Generate Your Projects.
===================================
Documents are available at [gyp.gsrc.io](https://gyp.gsrc.io), or you can check out ```md-pages``` branch to read those documents offline.

View File

@ -1,6 +0,0 @@
# This file is used by git cl to get repository specific information.
CC_LIST: gyp-developer@googlegroups.com
CODE_REVIEW_SERVER: codereview.chromium.org
GERRIT_HOST: True
PROJECT: gyp
VIEW_VC: https://chromium.googlesource.com/external/gyp/+/

View File

@ -1,12 +0,0 @@
// Copyright (c) 2013 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is used to generate an empty .pdb -- with a 4KB pagesize -- that is
// then used during the final link for modules that have large PDBs. Otherwise,
// the linker will generate a pdb with a page size of 1KB, which imposes a limit
// of 1GB on the .pdb. By generating an initial empty .pdb with the compiler
// (rather than the linker), this limit is avoided. With this in place PDBs may
// grow to 2GB.
//
// This file is referenced by the msvs_large_pdb mechanism in MSVSUtil.py.

View File

@ -1,8 +0,0 @@
#!/bin/sh
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
base=$(dirname "$0")
exec python "${base}/gyp_main.py" "$@"

View File

@ -1,5 +0,0 @@
@rem Copyright (c) 2009 Google Inc. All rights reserved.
@rem Use of this source code is governed by a BSD-style license that can be
@rem found in the LICENSE file.
@python "%~dp0gyp_main.py" %*

View File

@ -1,16 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())

View File

@ -1,243 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gyptest.py -- test runner for GYP tests."""
from __future__ import print_function
import argparse
import math
import os
import platform
import subprocess
import sys
import time
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--all", action="store_true",
help="run all tests")
parser.add_argument("-C", "--chdir", action="store",
help="change to directory")
parser.add_argument("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_argument("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_argument("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_argument("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_argument("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_argument("-q", "--quiet", action="store_true",
help="quiet, don't print anything unless there are failures")
parser.add_argument("-v", "--verbose", action="store_true",
help="print configuration info and test results.")
parser.add_argument('tests', nargs='*')
args = parser.parse_args(argv[1:])
if args.chdir:
os.chdir(args.chdir)
if args.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args.tests:
if not args.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args.tests = ['test']
tests = []
for arg in args.tests:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print(arg, 'is not a valid gyp test name.', file=sys.stderr)
sys.exit(1)
tests.append(arg)
if args.list:
for test in tests:
print(test)
sys.exit(0)
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if args.verbose:
print_configuration_info()
if args.gyp_option and not args.quiet:
print('Extra Gyp options: %s\n' % args.gyp_option)
if args.format:
format_list = args.format.split(',')
else:
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux': ['make', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
# TODO: Re-enable xcode-ninja.
# https://bugs.chromium.org/p/gyp/issues/detail?id=530
# 'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
'darwin': ['make', 'ninja', 'xcode'],
}[sys.platform]
gyp_options = []
for option in args.gyp_option:
gyp_options += ['-G', option]
runner = Runner(format_list, tests, gyp_options, args.verbose)
runner.run()
if not args.quiet:
runner.print_results()
if runner.failures:
return 1
else:
return 0
def print_configuration_info():
print('Test configuration:')
if sys.platform == 'darwin':
sys.path.append(os.path.abspath('test/lib'))
import TestMac
print(' Mac %s %s' % (platform.mac_ver()[0], platform.mac_ver()[2]))
print(' Xcode %s' % TestMac.Xcode.Version())
elif sys.platform == 'win32':
sys.path.append(os.path.abspath('pylib'))
import gyp.MSVSVersion
print(' Win %s %s\n' % platform.win32_ver()[0:2])
print(' MSVS %s' %
gyp.MSVSVersion.SelectVisualStudioVersion().Description())
elif sys.platform in ('linux', 'linux2'):
print(' Linux %s' % ' '.join(platform.linux_distribution()))
print(' Python %s' % platform.python_version())
print(' PYTHONPATH=%s' % os.environ['PYTHONPATH'])
print()
class Runner(object):
def __init__(self, formats, tests, gyp_options, verbose):
self.formats = formats
self.tests = tests
self.verbose = verbose
self.gyp_options = gyp_options
self.failures = []
self.num_tests = len(formats) * len(tests)
num_digits = len(str(self.num_tests))
self.fmt_str = '[%%%dd/%%%dd] (%%s) %%s' % (num_digits, num_digits)
self.isatty = sys.stdout.isatty() and not self.verbose
self.env = os.environ.copy()
self.hpos = 0
def run(self):
run_start = time.time()
i = 1
for fmt in self.formats:
for test in self.tests:
self.run_test(test, fmt, i)
i += 1
if self.isatty:
self.erase_current_line()
self.took = time.time() - run_start
def run_test(self, test, fmt, i):
if self.isatty:
self.erase_current_line()
msg = self.fmt_str % (i, self.num_tests, fmt, test)
self.print_(msg)
start = time.time()
cmd = [sys.executable, test] + self.gyp_options
self.env['TESTGYP_FORMAT'] = fmt
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=self.env)
proc.wait()
took = time.time() - start
stdout = proc.stdout.read().decode('utf8')
if proc.returncode == 2:
res = 'skipped'
elif proc.returncode:
res = 'failed'
self.failures.append('(%s) %s' % (test, fmt))
else:
res = 'passed'
res_msg = ' %s %.3fs' % (res, took)
self.print_(res_msg)
if (stdout and
not stdout.endswith('PASSED\n') and
not (stdout.endswith('NO RESULT\n'))):
print()
for l in stdout.splitlines():
print(' %s' % l)
elif not self.isatty:
print()
def print_(self, msg):
print(msg, end='')
index = msg.rfind('\n')
if index == -1:
self.hpos += len(msg)
else:
self.hpos = len(msg) - index
sys.stdout.flush()
def erase_current_line(self):
print('\b' * self.hpos + ' ' * self.hpos + '\b' * self.hpos, end='')
sys.stdout.flush()
self.hpos = 0
def print_results(self):
num_failures = len(self.failures)
if num_failures:
print()
if num_failures == 1:
print("Failed the following test:")
else:
print("Failed the following %d tests:" % num_failures)
print("\t" + "\n\t".join(sorted(self.failures)))
print()
print('Ran %d tests in %.3fs, %d failed.' % (self.num_tests, self.took,
num_failures))
print()
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,340 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()

View File

@ -1,208 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self._attrs = attrs or {}
self._attrs['Name'] = name
def _GetSpecification(self):
"""Creates an element for the tool.
Returns:
A new xml.dom.Element for the tool.
"""
return ['Tool', self._attrs]
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version, name, guid=None, platforms=None):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
name: Name of the project.
guid: GUID to use for project, if not None.
platforms: Array of string, the supported platforms. If null, ['Win32']
"""
self.project_path = project_path
self.version = version
self.name = name
self.guid = guid
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Initialize the specifications of the various sections.
self.platform_section = ['Platforms']
for platform in platforms:
self.platform_section.append(['Platform', {'Name': platform}])
self.tool_files_section = ['ToolFiles']
self.configurations_section = ['Configurations']
self.files_section = ['Files']
# Keep a dict keyed on filename to speed up access.
self.files_dict = dict()
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
"""Returns the specification for a configuration.
Args:
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Returns:
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
node_attrs = attrs.copy()
node_attrs['Name'] = config_name
specification = [config_type, node_attrs]
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
specification.append(t._GetSpecification())
else:
specification.append(Tool(t)._GetSpecification())
return specification
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
self.configurations_section.append(spec)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = ['Filter', {'Name': f.name}]
self._AddFilesToNode(node, f.contents)
else:
node = ['File', {'RelativePath': f}]
self.files_dict[f] = node
parent.append(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.files_section, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
tools)
parent.append(spec)
def WriteIfChanged(self):
"""Writes the project file."""
# First create XML content definition
content = [
'VisualStudioProject',
{'ProjectType': 'Visual C++',
'Version': self.version.ProjectVersion(),
'Name': self.name,
'ProjectGUID': self.guid,
'RootNamespace': self.name,
'Keyword': 'Win32Proj'
},
self.platform_section,
self.tool_files_section,
self.configurations_section,
['References'], # empty section
self.files_section,
['Globals'] # empty section
]
easy_xml.WriteXmlIfChanged(content, self.project_path,
encoding="Windows-1252")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")

View File

@ -1,147 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")

View File

@ -1,271 +0,0 @@
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
'windows_driver': 'sys',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in sorted(new_target_dicts):
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)

View File

@ -1,491 +0,0 @@
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
def JoinPath(*args):
return os.path.normpath(os.path.join(*args))
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None, compatible_sdks=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
compatible_sdks = compatible_sdks or []
compatible_sdks.sort(key=lambda v: float(v.replace('v', '')), reverse=True)
self.compatible_sdks = compatible_sdks
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def _SetupScriptInternal(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
assert target_arch in ('x86', 'x64'), "target_arch not supported"
# If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the
# depot_tools build tools and should run SetEnv.Cmd to set up the
# environment. The check for WindowsSDKDir alone is not sufficient because
# this is set by running vcvarsall.bat.
sdk_dir = os.environ.get('WindowsSDKDir', '')
setup_path = JoinPath(sdk_dir, 'Bin', 'SetEnv.Cmd')
if self.sdk_based and sdk_dir and os.path.exists(setup_path):
return [setup_path, '/' + target_arch]
is_host_arch_x64 = (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'
)
# For VS2017 (and newer) it's fairly easy
if self.short_name >= '2017':
script_path = JoinPath(self.path,
'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')
# Always use a native executable, cross-compiling if necessary.
host_arch = 'amd64' if is_host_arch_x64 else 'x86'
msvc_target_arch = 'amd64' if target_arch == 'x64' else 'x86'
arg = host_arch
if host_arch != msvc_target_arch:
arg += '_' + msvc_target_arch
return [script_path, arg]
# We try to find the best version of the env setup batch.
vcvarsall = JoinPath(self.path, 'VC', 'vcvarsall.bat')
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and \
is_host_arch_x64:
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [vcvarsall, 'amd64_x86']
else:
# Otherwise, the standard x86 compiler. We don't use VC/vcvarsall.bat
# for x86 because vcvarsall calls vcvars32, which it can only find if
# VS??COMNTOOLS is set, which isn't guaranteed.
return [JoinPath(self.path, 'Common7', 'Tools', 'vsvars32.bat')]
elif target_arch == 'x64':
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express edition and
# we're running on a 64bit OS.
if self.short_name[-1] != 'e' and is_host_arch_x64:
arg = 'amd64'
return [vcvarsall, arg]
def SetupScript(self, target_arch):
script_data = self._SetupScriptInternal(target_arch)
script_path = script_data[0]
if not os.path.exists(script_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
return script_data
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2017': VisualStudioVersion('2017',
'Visual Studio 2017',
solution_version='12.00',
project_version='15.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v141',
compatible_sdks=['v8.1', 'v10.0']),
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
2017 - Visual Studio 2017 (15)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
'15.0': '2017'
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Microsoft\VisualStudio\SxS\VS7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version == '15.0':
if os.path.exists(path):
versions.append(_CreateVersion('2017', path))
elif version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('15.0', '14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
'2017': ('15.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]

View File

@ -1,548 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())

View File

@ -1,619 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
if sys.platform.startswith('zos'):
return 'zos'
if sys.platform.startswith('os390'):
return 'zos'
return 'linux'
def CopyTool(flavor, out_path, generator_flags={}):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Set custom header flags.
header = '# Generated by gyp. Do not edit.\n'
mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None)
if flavor == 'mac' and mac_toolchain_dir:
header += "import os;\nos.environ['DEVELOPER_DIR']='%s'\n" \
% mac_toolchain_dir
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], header] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))

View File

@ -1,72 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()

View File

@ -1,162 +0,0 @@
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
import locale
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
default_encoding = locale.getdefaultlocale()[1]
if default_encoding and default_encoding.upper() != encoding.upper():
xml_string = xml_string.decode(default_encoding).encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '&quot;',
"'": '&apos;',
'<': '&lt;',
'>': '&gt;',
'&': '&amp;',
'\n': '&#xA;',
'\r': '&#xD;',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)

View File

@ -1,103 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '&lt;test&gt;\'&quot;&#xD;&amp;&#xA;foo'
converted_apos = converted.replace("'", '&apos;')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()

View File

@ -1,54 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -1,741 +0,0 @@
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to compile targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print 'finding compile targets for match', target.name
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
if 'compile_targets' in values:
values['compile_targets'].sort()
print 'Targets that need to be built:'
for target in values['compile_targets']:
print '\t', target
if 'test_targets' in values:
values['test_targets'].sort()
print 'Test targets:'
for target in values['test_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print 'supplied test_targets'
for target_name in self._test_target_names:
print '\t', target_name
print 'found test_targets'
for target in test_targets:
print '\t', target.name
print 'searching for matching test targets'
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print 'matched test_targets'
for target in matching_test_targets:
print '\t', target.name
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print '\tall'
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.itervalues():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print 'Supplied test_targets & compile_targets'
for target in supplied_targets:
print '\t', target.name
print 'Finding compile targets'
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))

File diff suppressed because it is too large Load Diff

View File

@ -1,99 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename

View File

@ -1,425 +0,0 @@
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)

View File

@ -1,94 +0,0 @@
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()

View File

@ -1,56 +0,0 @@
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,37 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the msvs.py file. """
import gyp.generator.msvs as msvs
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_GetLibraries(self):
self.assertEqual(
msvs._GetLibraries({}),
[])
self.assertEqual(
msvs._GetLibraries({'libraries': []}),
[])
self.assertEqual(
msvs._GetLibraries({'other':'foo', 'libraries': ['a.lib']}),
['a.lib'])
self.assertEqual(
msvs._GetLibraries({'libraries': ['-la']}),
['a.lib'])
self.assertEqual(
msvs._GetLibraries({'libraries': ['a.lib', 'b.lib', 'c.lib', '-lb.lib',
'-lb.lib', 'd.lib', 'a.lib']}),
['c.lib', 'b.lib', 'd.lib', 'a.lib'])
if __name__ == '__main__':
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the xcode.py file. """
import gyp.generator.xcode as xcode
import unittest
import sys
class TestEscapeXcodeDefine(unittest.TestCase):
if sys.platform == 'darwin':
def test_InheritedRemainsUnescaped(self):
self.assertEqual(xcode.EscapeXcodeDefine('$(inherited)'), '$(inherited)')
def test_Escaping(self):
self.assertEqual(xcode.EscapeXcodeDefine('a b"c\\'), 'a\\ b\\"c\\\\')
if __name__ == '__main__':
unittest.main()

File diff suppressed because it is too large Load Diff

View File

@ -1,90 +0,0 @@
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()

View File

@ -1,716 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import struct
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
convert_to_binary = convert_to_binary == 'True'
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings' and not convert_to_binary:
self._CopyStringsFile(source, dest)
else:
if os.path.exists(dest):
os.unlink(dest)
shutil.copy(source, dest)
if convert_to_binary and extension in ('.plist', '.strings'):
self._ConvertToBinary(dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
args.extend(['--auto-activate-custom-fonts'])
if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
args.extend([
'--target-device', 'iphone', '--target-device', 'ipad',
'--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
])
else:
args.extend([
'--target-device', 'mac',
'--minimum-deployment-target',
os.environ['MACOSX_DEPLOYMENT_TARGET'],
])
args.extend(['--output-format', 'human-readable-text', '--compile', dest,
source])
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
try:
stdout = subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.output)
raise
current_section_header = None
for line in stdout.splitlines():
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
print(current_section_header)
current_section_header = None
print(line)
return 0
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[_/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
r'file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageIosFramework(self, framework):
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
module_path = os.path.join(framework, 'Modules');
if not os.path.exists(module_path):
os.mkdir(module_path)
module_template = 'framework module %s {\n' \
' umbrella header "%s.h"\n' \
'\n' \
' export *\n' \
' module * { export * }\n' \
'}\n' % (binary, binary)
module_file = open(os.path.join(module_path, 'module.modulemap'), "w")
module_file.write(module_template)
module_file.close()
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileIosFrameworkHeaderMap(self, out, framework, *all_headers):
framework_name = os.path.basename(framework).split('.')[0]
all_headers = map(os.path.abspath, all_headers)
filelist = {}
for header in all_headers:
filename = os.path.basename(header)
filelist[filename] = header
filelist[os.path.join(framework_name, filename)] = header
WriteHmap(out, filelist)
def ExecCopyIosFrameworkHeaders(self, framework, *copy_headers):
header_path = os.path.join(framework, 'Headers');
if not os.path.exists(header_path):
os.makedirs(header_path)
for header in copy_headers:
shutil.copy(header, os.path.join(header_path, os.path.basename(header)))
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, entitlements, provisioning, path, preserve):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
2. copy Entitlements.plist from user or SDK next to the bundle,
3. code sign the bundle.
"""
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
args = ['codesign', '--force', '--sign', key]
if preserve == 'True':
args.extend(['--deep', '--preserve-metadata=identifier,entitlements'])
else:
args.extend(['--entitlements', entitlements_path])
args.extend(['--timestamp=none', path])
subprocess.check_call(args)
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
def NextGreaterPowerOf2(x):
return 2**(x).bit_length()
def WriteHmap(output_name, filelist):
"""Generates a header map based on |filelist|.
Per Mark Mentovai:
A header map is structured essentially as a hash table, keyed by names used
in #includes, and providing pathnames to the actual files.
The implementation below and the comment above comes from inspecting:
http://www.opensource.apple.com/source/distcc/distcc-2503/distcc_dist/include_server/headermap.py?txt
while also looking at the implementation in clang in:
https://llvm.org/svn/llvm-project/cfe/trunk/lib/Lex/HeaderMap.cpp
"""
magic = 1751998832
version = 1
_reserved = 0
count = len(filelist)
capacity = NextGreaterPowerOf2(count)
strings_offset = 24 + (12 * capacity)
max_value_length = len(max(filelist.items(), key=lambda (k,v):len(v))[1])
out = open(output_name, "wb")
out.write(struct.pack('<LHHLLLL', magic, version, _reserved, strings_offset,
count, capacity, max_value_length))
# Create empty hashmap buckets.
buckets = [None] * capacity
for file, path in filelist.items():
key = 0
for c in file:
key += ord(c.lower()) * 13
# Fill next empty bucket.
while buckets[key & capacity - 1] is not None:
key = key + 1
buckets[key & capacity - 1] = (file, path)
next_offset = 1
for bucket in buckets:
if bucket is None:
out.write(struct.pack('<LLL', 0, 0, 0))
else:
(file, path) = bucket
key_offset = next_offset
prefix_offset = key_offset + len(file) + 1
suffix_offset = prefix_offset + len(os.path.dirname(path) + os.sep) + 1
next_offset = suffix_offset + len(os.path.basename(path)) + 1
out.write(struct.pack('<LLL', key_offset, prefix_offset, suffix_offset))
# Pad byte since next offset starts at 1.
out.write(struct.pack('<x'))
for bucket in buckets:
if bucket is not None:
(file, path) = bucket
out.write(struct.pack('<%ds' % len(file), file))
out.write(struct.pack('<s', '\0'))
base = os.path.dirname(path) + os.sep
out.write(struct.pack('<%ds' % len(base), base))
out.write(struct.pack('<s', '\0'))
path = os.path.basename(path)
out.write(struct.pack('<%ds' % len(path), path))
out.write(struct.pack('<s', '\0'))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

File diff suppressed because it is too large Load Diff

View File

@ -1,160 +0,0 @@
# This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')

View File

@ -1,289 +0,0 @@
# Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)

View File

@ -1,46 +0,0 @@
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A clone of the default copy.deepcopy that doesn't handle cyclic
structures or complex types except for dicts and lists. This is
because gyp copies so large structure that small copy overhead ends up
taking seconds in a project the size of Chromium."""
class Error(Exception):
pass
__all__ = ["Error", "deepcopy"]
def deepcopy(x):
"""Deep copy operation on gyp objects such as strings, ints, dicts
and lists. More than twice as fast as copy.deepcopy but much less
generic."""
try:
return _deepcopy_dispatch[type(x)](x)
except KeyError:
raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
'or expand simple_copy support.' % type(x))
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x):
return x
for x in (type(None), int, long, float,
bool, str, unicode, type):
d[x] = _deepcopy_atomic
def _deepcopy_list(x):
return [deepcopy(a) for a in x]
d[list] = _deepcopy_list
def _deepcopy_dict(x):
y = {}
for key, value in x.iteritems():
y[deepcopy(key)] = deepcopy(value)
return y
d[dict] = _deepcopy_dict
del d

View File

@ -1,322 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
if sys.platform == 'win32':
args = list(args) # *args is a tuple by default, which is read-only.
args[0] = args[0].replace('/', '\\')
# https://docs.python.org/2/library/subprocess.html:
# "On Unix with shell=True [...] if args is a sequence, the first item
# specifies the command string, and any additional items will be treated as
# additional arguments to the shell itself. That is to say, Popen does the
# equivalent of:
# Popen(['/bin/sh', '-c', args[0], args[1], ...])"
# For that reason, since going through the shell doesn't seem necessary on
# non-Windows don't do that there.
link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

File diff suppressed because it is too large Load Diff

View File

@ -1,289 +0,0 @@
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
for key in ['BUNDLE_LOADER', 'TEST_HOST']:
if key in old_xcode_settings:
new_xcode_settings[key] = old_xcode_settings[key]
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['mac_xctest_bundle'] = old_spec.get('mac_xctest_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets and xc_tests.
if (int(spec.get('mac_xctest_bundle', 0)) != 0 or
(spec.get('type', '') == 'executable' and
spec.get('product_extension', '') != 'bundle')):
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
# Put excluded files into the sources target so they can be opened in Xcode.
skip_excluded_files = \
not generator_flags.get('xcode_ninja_list_excluded_files', True)
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
if not skip_excluded_files:
files.extend(target_dict.get('sources_excluded', []) +
target_dict.get('mac_bundle_resources_excluded', []))
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
if not skip_excluded_files:
files.extend(action.get('inputs_excluded', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)

File diff suppressed because it is too large Load Diff

View File

@ -1,69 +0,0 @@
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&amp;").replace("<", "&lt;")
data = data.replace("\"", "&quot;").replace(">", "&gt;")
if is_attrib:
data = data.replace(
"\r", "&#xD;").replace(
"\n", "&#xA;").replace(
"\t", "&#x9;")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()

View File

@ -1,19 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from setuptools import setup
setup(
name='gyp',
version='0.1',
description='Generate Your Projects',
author='Chromium Authors',
author_email='chromium-dev@googlegroups.com',
url='http://code.google.com/p/gyp',
package_dir = {'': 'pylib'},
packages=['gyp', 'gyp.generator'],
entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
)

View File

@ -1,236 +0,0 @@
#!/bin/sh
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
SPARSE=${SPARSE:-sparse}
SPARSE_FLAGS=${SPARSE_FLAGS:-"
-D__POSIX__
-Wsparse-all
-Wno-do-while
-Wno-transparent-union
-Iinclude
-Isrc
"}
SOURCES="
include/tree.h
include/uv-unix.h
include/uv.h
src/fs-poll.c
src/inet.c
src/queue.h
src/unix/async.c
src/unix/core.c
src/unix/dl.c
src/unix/fs.c
src/unix/getaddrinfo.c
src/unix/internal.h
src/unix/loop-watcher.c
src/unix/loop.c
src/unix/pipe.c
src/unix/poll.c
src/unix/process.c
src/unix/signal.c
src/unix/stream.c
src/unix/tcp.c
src/unix/thread.c
src/unix/threadpool.c
src/unix/timer.c
src/unix/tty.c
src/unix/udp.c
src/uv-common.c
src/uv-common.h
"
TESTS="
test/benchmark-async-pummel.c
test/benchmark-async.c
test/benchmark-fs-stat.c
test/benchmark-getaddrinfo.c
test/benchmark-loop-count.c
test/benchmark-million-async.c
test/benchmark-million-timers.c
test/benchmark-multi-accept.c
test/benchmark-ping-pongs.c
test/benchmark-pound.c
test/benchmark-pump.c
test/benchmark-sizes.c
test/benchmark-spawn.c
test/benchmark-tcp-write-batch.c
test/benchmark-thread.c
test/benchmark-udp-pummel.c
test/blackhole-server.c
test/dns-server.c
test/echo-server.c
test/run-benchmarks.c
test/run-tests.c
test/runner-unix.c
test/runner-unix.h
test/runner.c
test/runner.h
test/task.h
test/test-active.c
test/test-async.c
test/test-barrier.c
test/test-callback-order.c
test/test-callback-stack.c
test/test-condvar.c
test/test-connection-fail.c
test/test-cwd-and-chdir.c
test/test-delayed-accept.c
test/test-dlerror.c
test/test-embed.c
test/test-error.c
test/test-fail-always.c
test/test-fs-event.c
test/test-fs-poll.c
test/test-fs.c
test/test-get-currentexe.c
test/test-get-loadavg.c
test/test-get-memory.c
test/test-get-passwd.c
test/test-getaddrinfo.c
test/test-getsockname.c
test/test-homedir.c
test/test-hrtime.c
test/test-idle.c
test/test-ip6-addr.c
test/test-ipc-send-recv.c
test/test-ipc.c
test/test-loop-handles.c
test/test-multiple-listen.c
test/test-mutexes.c
test/test-pass-always.c
test/test-ping-pong.c
test/test-pipe-bind-error.c
test/test-pipe-connect-error.c
test/test-pipe-sendmsg.c
test/test-pipe-server-close.c
test/test-platform-output.c
test/test-poll-close.c
test/test-poll.c
test/test-process-title.c
test/test-ref.c
test/test-run-nowait.c
test/test-run-once.c
test/test-semaphore.c
test/test-shutdown-close.c
test/test-shutdown-eof.c
test/test-signal-multiple-loops.c
test/test-signal.c
test/test-spawn.c
test/test-stdio-over-pipes.c
test/test-tcp-bind-error.c
test/test-tcp-bind6-error.c
test/test-tcp-close-while-connecting.c
test/test-tcp-close-accept.c
test/test-tcp-close.c
test/test-tcp-connect-error-after-write.c
test/test-tcp-connect-error.c
test/test-tcp-connect-timeout.c
test/test-tcp-connect6-error.c
test/test-tcp-flags.c
test/test-tcp-open.c
test/test-tcp-read-stop.c
test/test-tcp-shutdown-after-write.c
test/test-tcp-unexpected-read.c
test/test-tcp-oob.c
test/test-tcp-write-error.c
test/test-tcp-write-to-half-open-connection.c
test/test-tcp-writealot.c
test/test-thread.c
test/test-threadpool-cancel.c
test/test-threadpool.c
test/test-timer-again.c
test/test-timer.c
test/test-tmpdir.c
test/test-tty.c
test/test-udp-dgram-too-big.c
test/test-udp-ipv6.c
test/test-udp-multicast-join.c
test/test-udp-multicast-ttl.c
test/test-udp-open.c
test/test-udp-options.c
test/test-udp-send-and-recv.c
test/test-walk-handles.c
test/test-watcher-cross-stop.c
"
case `uname -s` in
AIX)
SPARSE_FLAGS="$SPARSE_FLAGS -D_AIX=1"
SOURCES="$SOURCES
src/unix/aix.c"
;;
Darwin)
SPARSE_FLAGS="$SPARSE_FLAGS -D__APPLE__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/darwin.c
src/unix/kqueue.c
src/unix/fsevents.c"
;;
DragonFly)
SPARSE_FLAGS="$SPARSE_FLAGS -D__DragonFly__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
FreeBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__FreeBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/freebsd.c"
;;
Linux)
SPARSE_FLAGS="$SPARSE_FLAGS -D__linux__=1"
SOURCES="$SOURCES
include/uv-linux.h
src/unix/linux-inotify.c
src/unix/linux-core.c
src/unix/linux-syscalls.c
src/unix/linux-syscalls.h"
;;
NetBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__NetBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/netbsd.c"
;;
OpenBSD)
SPARSE_FLAGS="$SPARSE_FLAGS -D__OpenBSD__=1"
SOURCES="$SOURCES
include/uv-bsd.h
src/unix/kqueue.c
src/unix/openbsd.c"
;;
SunOS)
SPARSE_FLAGS="$SPARSE_FLAGS -D__sun=1"
SOURCES="$SOURCES
include/uv-sunos.h
src/unix/sunos.c"
;;
esac
for ARCH in __i386__ __x86_64__ __arm__ __mips__; do
$SPARSE $SPARSE_FLAGS -D$ARCH=1 $SOURCES
done
# Tests are architecture independent.
$SPARSE $SPARSE_FLAGS -Itest $TESTS

206
deps/libuv/common.gypi vendored
View File

@ -1,206 +0,0 @@
{
'variables': {
'target_arch%': 'ia32', # set v8's target architecture
'host_arch%': 'ia32', # set v8's host architecture
'uv_library%': 'static_library', # allow override to 'shared_library' for DLL/.so builds
'msvs_multi_core_compile': '0', # we do enable multicore compiles, but not using the V8 way
},
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'cflags': [ '-g' ],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['uv_library=="static_library"', {
'RuntimeLibrary': 1, # static debug
}, {
'RuntimeLibrary': 3, # DLL debug
}],
],
'Optimization': 0, # /Od, no optimization
'MinimalRebuild': 'false',
'OmitFramePointers': 'false',
'BasicRuntimeChecks': 3, # /RTC1
},
'VCLinkerTool': {
'LinkIncremental': 2, # enable incremental linking
},
},
'xcode_settings': {
'GCC_OPTIMIZATION_LEVEL': '0',
'OTHER_CFLAGS': [ '-Wno-strict-aliasing' ],
},
'conditions': [
['OS != "zos"', {
'cflags': [ '-O0', '-fwrapv' ]
}],
['OS == "android"', {
'cflags': [ '-fPIE' ],
'ldflags': [ '-fPIE', '-pie' ]
}]
]
},
'Release': {
'defines': [ 'NDEBUG' ],
'cflags': [
'-O3',
'-fstrict-aliasing',
'-fomit-frame-pointer',
'-fdata-sections',
'-ffunction-sections',
],
'msvs_settings': {
'VCCLCompilerTool': {
'target_conditions': [
['uv_library=="static_library"', {
'RuntimeLibrary': 0, # static release
}, {
'RuntimeLibrary': 2, # debug release
}],
],
'Optimization': 3, # /Ox, full optimization
'FavorSizeOrSpeed': 1, # /Ot, favour speed over size
'InlineFunctionExpansion': 2, # /Ob2, inline anything eligible
'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG
'OmitFramePointers': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/LTCG', # link time code generation
],
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': 1, # link-time code generation
'OptimizeReferences': 2, # /OPT:REF
'EnableCOMDATFolding': 2, # /OPT:ICF
'LinkIncremental': 1, # disable incremental linking
},
},
}
},
'msvs_settings': {
'VCCLCompilerTool': {
'StringPooling': 'true', # pool string literals
'DebugInformationFormat': 3, # Generate a PDB
'WarningLevel': 3,
'BufferSecurityCheck': 'true',
'ExceptionHandling': 1, # /EHsc
'SuppressStartupBanner': 'true',
'WarnAsError': 'false',
'AdditionalOptions': [
'/MP', # compile across multiple CPUs
],
},
'VCLibrarianTool': {
},
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
'RandomizedBaseAddress': 2, # enable ASLR
'DataExecutionPrevention': 2, # enable DEP
'AllowIsolation': 'true',
'SuppressStartupBanner': 'true',
'target_conditions': [
['_type=="executable"', {
'SubSystem': 1, # console executable
}],
],
},
},
'conditions': [
['OS == "win"', {
'msvs_cygwin_shell': 0, # prevent actions from trying to use cygwin
'defines': [
'WIN32',
# we don't really want VC++ warning us about
# how dangerous C functions are...
'_CRT_SECURE_NO_DEPRECATE',
# ... or that C implementations shouldn't use
# POSIX names
'_CRT_NONSTDC_NO_DEPRECATE',
],
'target_conditions': [
['target_arch=="x64"', {
'msvs_configuration_platform': 'x64'
}]
]
}],
['OS in "freebsd dragonflybsd linux openbsd solaris android"', {
'cflags': [ '-Wall' ],
'cflags_cc': [ '-fno-rtti', '-fno-exceptions' ],
'target_conditions': [
['_type=="static_library"', {
'standalone_static_library': 1, # disable thin archive which needs binutils >= 2.19
}],
],
'conditions': [
[ 'host_arch != target_arch and target_arch=="ia32"', {
'cflags': [ '-m32' ],
'ldflags': [ '-m32' ],
}],
[ 'target_arch=="x32"', {
'cflags': [ '-mx32' ],
'ldflags': [ '-mx32' ],
}],
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
[ 'OS=="solaris"', {
'cflags': [ '-pthreads' ],
'ldflags': [ '-pthreads' ],
}],
[ 'OS not in "solaris android zos"', {
'cflags': [ '-pthread' ],
'ldflags': [ '-pthread' ],
}],
],
}],
['OS=="mac"', {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic
# (Equivalent to -fPIC)
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions
'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti
'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'PREBINDING': 'NO', # No -Wl,-prebind
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fstrict-aliasing',
],
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
],
},
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {'ARCHS': ['i386']},
}],
['target_arch=="x64"', {
'xcode_settings': {'ARCHS': ['x86_64']},
}],
],
'target_conditions': [
['_type!="static_library"', {
'xcode_settings': {'OTHER_LDFLAGS': ['-Wl,-search_paths_first']},
}],
],
}],
['OS=="solaris"', {
'cflags': [ '-fno-omit-frame-pointer' ],
# pull in V8's postmortem metadata
'ldflags': [ '-Wl,-z,allextract' ]
}],
],
},
}

View File

@ -1,74 +0,0 @@
# Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.10.0], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
m4_include([m4/libuv-check-flags.m4])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects] UV_EXTRA_AUTOMAKE_FLAGS)
AC_CANONICAL_HOST
AC_ENABLE_SHARED
AC_ENABLE_STATIC
AC_PROG_CC
AM_PROG_CC_C_O
AS_IF([AS_CASE([$host_os],[openedition*], [false], [true])], [
CC_CHECK_CFLAGS_APPEND([-pedantic])
])
CC_FLAG_VISIBILITY #[-fvisibility=hidden]
CC_CHECK_CFLAGS_APPEND([-g])
CC_CHECK_CFLAGS_APPEND([-std=gnu89])
CC_CHECK_CFLAGS_APPEND([-Wall])
CC_CHECK_CFLAGS_APPEND([-Wextra])
CC_CHECK_CFLAGS_APPEND([-Wno-unused-parameter])
# AM_PROG_AR is not available in automake v0.11 but it's essential in v0.12.
m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
# autoconf complains if AC_PROG_LIBTOOL precedes AM_PROG_AR.
AC_PROG_LIBTOOL
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
LT_INIT
# TODO(bnoordhuis) Check for -pthread vs. -pthreads
AC_CHECK_LIB([dl], [dlopen])
AC_CHECK_LIB([kstat], [kstat_lookup])
AC_CHECK_LIB([kvm], [kvm_open])
AC_CHECK_LIB([nsl], [gethostbyname])
AC_CHECK_LIB([perfstat], [perfstat_cpu])
AC_CHECK_LIB([pthread], [pthread_mutex_init])
AC_CHECK_LIB([rt], [clock_gettime])
AC_CHECK_LIB([sendfile], [sendfile])
AC_CHECK_LIB([socket], [socket])
AC_SYS_LARGEFILE
AM_CONDITIONAL([AIX], [AS_CASE([$host_os],[aix*], [true], [false])])
AM_CONDITIONAL([ANDROID], [AS_CASE([$host_os],[linux-android*],[true], [false])])
AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])])
AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])])
AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[*freebsd*], [true], [false])])
AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])])
AM_CONDITIONAL([NETBSD], [AS_CASE([$host_os],[netbsd*], [true], [false])])
AM_CONDITIONAL([OPENBSD], [AS_CASE([$host_os],[openbsd*], [true], [false])])
AM_CONDITIONAL([OS390], [AS_CASE([$host_os],[openedition*], [true], [false])])
AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])])
AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])])
AS_CASE([$host_os],[mingw*], [
LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32"
])
AC_CHECK_HEADERS([sys/ahafs_evProds.h])
AC_CHECK_PROG(PKG_CONFIG, pkg-config, yes)
AM_CONDITIONAL([HAVE_PKG_CONFIG], [test "x$PKG_CONFIG" != "x"])
AS_IF([test "x$PKG_CONFIG" != "x"], [
AC_CONFIG_FILES([libuv.pc])
])
AC_CONFIG_FILES([Makefile])
AC_OUTPUT

94
deps/libuv/gyp_uv.py vendored
View File

@ -1,94 +0,0 @@
#!/usr/bin/env python
import os
import platform
import sys
try:
import multiprocessing.synchronize
gyp_parallel_support = True
except ImportError:
gyp_parallel_support = False
CC = os.environ.get('CC', 'cc')
script_dir = os.path.dirname(__file__)
uv_root = os.path.normpath(script_dir)
output_dir = os.path.join(os.path.abspath(uv_root), 'out')
sys.path.insert(0, os.path.join(uv_root, 'build', 'gyp', 'pylib'))
try:
import gyp
except ImportError:
print('You need to install gyp in build/gyp first. See the README.')
sys.exit(42)
def host_arch():
machine = platform.machine()
if machine == 'i386': return 'ia32'
if machine == 'AMD64': return 'x64'
if machine == 'x86_64': return 'x64'
if machine.startswith('arm'): return 'arm'
if machine.startswith('mips'): return 'mips'
return machine # Return as-is and hope for the best.
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print('Error running GYP')
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(uv_root, 'uv.gyp'))
common_fn = os.path.join(uv_root, 'common.gypi')
options_fn = os.path.join(uv_root, 'options.gypi')
# we force vs 2010 over 2008 which would otherwise be the default for gyp
if not os.environ.get('GYP_MSVS_VERSION'):
os.environ['GYP_MSVS_VERSION'] = '2010'
else:
args.append(os.path.join(os.path.abspath(uv_root), 'uv.gyp'))
common_fn = os.path.join(os.path.abspath(uv_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(uv_root), 'options.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
args.append('--depth=' + uv_root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32':
if '-f' not in args:
args.extend('-f make'.split())
if 'eclipse' not in args and 'ninja' not in args:
args.extend(['-Goutput_dir=' + output_dir])
args.extend(['--generator-output', output_dir])
if not any(a.startswith('-Dhost_arch=') for a in args):
args.append('-Dhost_arch=%s' % host_arch())
if not any(a.startswith('-Dtarget_arch=') for a in args):
args.append('-Dtarget_arch=%s' % host_arch())
if not any(a.startswith('-Duv_library=') for a in args):
args.append('-Duv_library=static_library')
# Some platforms (OpenBSD for example) don't have multiprocessing.synchronize
# so gyp must be run with --no-parallel
if not gyp_parallel_support:
args.append('--no-parallel')
gyp_args = list(args)
print(gyp_args)
run_gyp(gyp_args)

View File

@ -1,66 +0,0 @@
/*
Copyright (c) 2016, Kari Tristan Helgason <kthelgason@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _UV_PTHREAD_BARRIER_
#define _UV_PTHREAD_BARRIER_
#include <errno.h>
#include <pthread.h>
#include <semaphore.h> /* sem_t */
#define PTHREAD_BARRIER_SERIAL_THREAD 0x12345
/*
* To maintain ABI compatibility with
* libuv v1.x struct is padded according
* to target platform
*/
#if defined(__ANDROID__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
sizeof(pthread_cond_t) + \
sizeof(unsigned int) - \
sizeof(void *)
#elif defined(__APPLE__)
# define UV_BARRIER_STRUCT_PADDING \
sizeof(pthread_mutex_t) + \
2 * sizeof(sem_t) + \
2 * sizeof(unsigned int) - \
sizeof(void *)
#else
# define UV_BARRIER_STRUCT_PADDING 0
#endif
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
} _uv_barrier;
typedef struct {
_uv_barrier* b;
char _pad[UV_BARRIER_STRUCT_PADDING];
} pthread_barrier_t;
int pthread_barrier_init(pthread_barrier_t* barrier,
const void* barrier_attr,
unsigned count);
int pthread_barrier_wait(pthread_barrier_t* barrier);
int pthread_barrier_destroy(pthread_barrier_t *barrier);
#endif /* _UV_PTHREAD_BARRIER_ */

View File

@ -27,6 +27,10 @@
extern "C" {
#endif
#if defined(BUILDING_UV_SHARED) && defined(USING_UV_SHARED)
#error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both."
#endif
#ifdef _WIN32
/* Windows - set up dll import/export decorators. */
# if defined(BUILDING_UV_SHARED)
@ -45,21 +49,21 @@ extern "C" {
# define UV_EXTERN /* nothing */
#endif
#include "uv-errno.h"
#include "uv-version.h"
#include "uv/errno.h"
#include "uv/version.h"
#include <stddef.h>
#include <stdio.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#if defined(_WIN32)
# include "uv-win.h"
# include "uv/win.h"
#else
# include "uv-unix.h"
# include "uv/unix.h"
#endif
/* Expand this list if necessary. */
@ -140,6 +144,10 @@ extern "C" {
XX(ENXIO, "no such device or address") \
XX(EMLINK, "too many links") \
XX(EHOSTDOWN, "host is down") \
XX(EREMOTEIO, "remote I/O error") \
XX(ENOTTY, "inappropriate ioctl for device") \
XX(EFTYPE, "inappropriate file type or format") \
XX(EILSEQ, "illegal byte sequence") \
#define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \
@ -169,6 +177,7 @@ extern "C" {
XX(WORK, work) \
XX(GETADDRINFO, getaddrinfo) \
XX(GETNAMEINFO, getnameinfo) \
XX(RANDOM, random) \
typedef enum {
#define XX(code, _) UV_ ## code = UV__ ## code,
@ -199,6 +208,7 @@ typedef enum {
/* Handle types. */
typedef struct uv_loop_s uv_loop_t;
typedef struct uv_handle_s uv_handle_t;
typedef struct uv_dir_s uv_dir_t;
typedef struct uv_stream_s uv_stream_t;
typedef struct uv_tcp_s uv_tcp_t;
typedef struct uv_udp_s uv_udp_t;
@ -225,12 +235,16 @@ typedef struct uv_connect_s uv_connect_t;
typedef struct uv_udp_send_s uv_udp_send_t;
typedef struct uv_fs_s uv_fs_t;
typedef struct uv_work_s uv_work_t;
typedef struct uv_random_s uv_random_t;
/* None of the above. */
typedef struct uv_env_item_s uv_env_item_t;
typedef struct uv_cpu_info_s uv_cpu_info_t;
typedef struct uv_interface_address_s uv_interface_address_t;
typedef struct uv_dirent_s uv_dirent_t;
typedef struct uv_passwd_s uv_passwd_t;
typedef struct uv_utsname_s uv_utsname_t;
typedef struct uv_statfs_s uv_statfs_t;
typedef enum {
UV_LOOP_BLOCK_SIGNAL
@ -274,6 +288,7 @@ UV_EXTERN void uv_loop_delete(uv_loop_t*);
UV_EXTERN size_t uv_loop_size(void);
UV_EXTERN int uv_loop_alive(const uv_loop_t* loop);
UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...);
UV_EXTERN int uv_loop_fork(uv_loop_t* loop);
UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode);
UV_EXTERN void uv_stop(uv_loop_t*);
@ -317,6 +332,10 @@ typedef void (*uv_getnameinfo_cb)(uv_getnameinfo_t* req,
int status,
const char* hostname,
const char* service);
typedef void (*uv_random_cb)(uv_random_t* req,
int status,
void* buf,
size_t buflen);
typedef struct {
long tv_sec;
@ -366,7 +385,10 @@ typedef enum {
UV_EXTERN int uv_translate_sys_error(int sys_errno);
UV_EXTERN const char* uv_strerror(int err);
UV_EXTERN char* uv_strerror_r(int err, char* buf, size_t buflen);
UV_EXTERN const char* uv_err_name(int err);
UV_EXTERN char* uv_err_name_r(int err, char* buf, size_t buflen);
#define UV_REQ_FIELDS \
@ -375,8 +397,7 @@ UV_EXTERN const char* uv_err_name(int err);
/* read-only */ \
uv_req_type type; \
/* private */ \
void* active_queue[2]; \
void* reserved[4]; \
void* reserved[6]; \
UV_REQ_PRIVATE_FIELDS \
/* Abstract base class of all requests. */
@ -422,7 +443,17 @@ struct uv_handle_s {
};
UV_EXTERN size_t uv_handle_size(uv_handle_type type);
UV_EXTERN uv_handle_type uv_handle_get_type(const uv_handle_t* handle);
UV_EXTERN const char* uv_handle_type_name(uv_handle_type type);
UV_EXTERN void* uv_handle_get_data(const uv_handle_t* handle);
UV_EXTERN uv_loop_t* uv_handle_get_loop(const uv_handle_t* handle);
UV_EXTERN void uv_handle_set_data(uv_handle_t* handle, void* data);
UV_EXTERN size_t uv_req_size(uv_req_type type);
UV_EXTERN void* uv_req_get_data(const uv_req_t* req);
UV_EXTERN void uv_req_set_data(uv_req_t* req, void* data);
UV_EXTERN uv_req_type uv_req_get_type(const uv_req_t* req);
UV_EXTERN const char* uv_req_type_name(uv_req_type type);
UV_EXTERN int uv_is_active(const uv_handle_t* handle);
@ -462,6 +493,8 @@ struct uv_stream_s {
UV_STREAM_FIELDS
};
UV_EXTERN size_t uv_stream_get_write_queue_size(const uv_stream_t* stream);
UV_EXTERN int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb);
UV_EXTERN int uv_accept(uv_stream_t* server, uv_stream_t* client);
@ -489,7 +522,7 @@ UV_EXTERN int uv_try_write(uv_stream_t* handle,
struct uv_write_s {
UV_REQ_FIELDS
uv_write_cb cb;
uv_stream_t* send_handle;
uv_stream_t* send_handle; /* TODO: make private and unix-only in v2.x. */
uv_stream_t* handle;
UV_WRITE_PRIVATE_FIELDS
};
@ -537,6 +570,7 @@ UV_EXTERN int uv_tcp_getsockname(const uv_tcp_t* handle,
UV_EXTERN int uv_tcp_getpeername(const uv_tcp_t* handle,
struct sockaddr* name,
int* namelen);
UV_EXTERN int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb);
UV_EXTERN int uv_tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
@ -611,7 +645,11 @@ UV_EXTERN int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock);
UV_EXTERN int uv_udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int flags);
UV_EXTERN int uv_udp_connect(uv_udp_t* handle, const struct sockaddr* addr);
UV_EXTERN int uv_udp_getpeername(const uv_udp_t* handle,
struct sockaddr* name,
int* namelen);
UV_EXTERN int uv_udp_getsockname(const uv_udp_t* handle,
struct sockaddr* name,
int* namelen);
@ -619,6 +657,11 @@ UV_EXTERN int uv_udp_set_membership(uv_udp_t* handle,
const char* multicast_addr,
const char* interface_addr,
uv_membership membership);
UV_EXTERN int uv_udp_set_source_membership(uv_udp_t* handle,
const char* multicast_addr,
const char* interface_addr,
const char* source_addr,
uv_membership membership);
UV_EXTERN int uv_udp_set_multicast_loop(uv_udp_t* handle, int on);
UV_EXTERN int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl);
UV_EXTERN int uv_udp_set_multicast_interface(uv_udp_t* handle,
@ -639,6 +682,8 @@ UV_EXTERN int uv_udp_recv_start(uv_udp_t* handle,
uv_alloc_cb alloc_cb,
uv_udp_recv_cb recv_cb);
UV_EXTERN int uv_udp_recv_stop(uv_udp_t* handle);
UV_EXTERN size_t uv_udp_get_send_queue_size(const uv_udp_t* handle);
UV_EXTERN size_t uv_udp_get_send_queue_count(const uv_udp_t* handle);
/*
@ -661,10 +706,25 @@ typedef enum {
UV_TTY_MODE_IO
} uv_tty_mode_t;
typedef enum {
/*
* The console supports handling of virtual terminal sequences
* (Windows10 new console, ConEmu)
*/
UV_TTY_SUPPORTED,
/* The console cannot process the virtual terminal sequence. (Legacy
* console)
*/
UV_TTY_UNSUPPORTED
} uv_tty_vtermstate_t;
UV_EXTERN int uv_tty_init(uv_loop_t*, uv_tty_t*, uv_file fd, int readable);
UV_EXTERN int uv_tty_set_mode(uv_tty_t*, uv_tty_mode_t mode);
UV_EXTERN int uv_tty_reset_mode(void);
UV_EXTERN int uv_tty_get_winsize(uv_tty_t*, int* width, int* height);
UV_EXTERN void uv_tty_set_vterm_state(uv_tty_vtermstate_t state);
UV_EXTERN int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state);
#ifdef __cplusplus
extern "C++" {
@ -707,6 +767,7 @@ UV_EXTERN int uv_pipe_getpeername(const uv_pipe_t* handle,
UV_EXTERN void uv_pipe_pending_instances(uv_pipe_t* handle, int count);
UV_EXTERN int uv_pipe_pending_count(uv_pipe_t* handle);
UV_EXTERN uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle);
UV_EXTERN int uv_pipe_chmod(uv_pipe_t* handle, int flags);
struct uv_poll_s {
@ -718,7 +779,8 @@ struct uv_poll_s {
enum uv_poll_event {
UV_READABLE = 1,
UV_WRITABLE = 2,
UV_DISCONNECT = 4
UV_DISCONNECT = 4,
UV_PRIORITIZED = 8
};
UV_EXTERN int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd);
@ -847,7 +909,13 @@ typedef enum {
* flags may be specified to create a duplex data stream.
*/
UV_READABLE_PIPE = 0x10,
UV_WRITABLE_PIPE = 0x20
UV_WRITABLE_PIPE = 0x20,
/*
* Open the child pipe handle in overlapped mode on Windows.
* On Unix it is silently ignored.
*/
UV_OVERLAPPED_PIPE = 0x40
} uv_stdio_flags;
typedef struct uv_stdio_container_s {
@ -934,12 +1002,23 @@ enum uv_process_flags {
* the child's process handle.
*/
UV_PROCESS_DETACHED = (1 << 3),
/*
* Hide the subprocess window that would normally be created. This option is
* only meaningful on Windows systems. On Unix it is silently ignored.
*/
UV_PROCESS_WINDOWS_HIDE = (1 << 4),
/*
* Hide the subprocess console window that would normally be created. This
* option is only meaningful on Windows systems. On Unix it is silently
* ignored.
*/
UV_PROCESS_WINDOWS_HIDE = (1 << 4)
UV_PROCESS_WINDOWS_HIDE_CONSOLE = (1 << 5),
/*
* Hide the subprocess GUI window that would normally be created. This
* option is only meaningful on Windows systems. On Unix it is silently
* ignored.
*/
UV_PROCESS_WINDOWS_HIDE_GUI = (1 << 6)
};
/*
@ -957,6 +1036,7 @@ UV_EXTERN int uv_spawn(uv_loop_t* loop,
const uv_process_options_t* options);
UV_EXTERN int uv_process_kill(uv_process_t*, int signum);
UV_EXTERN int uv_kill(int pid, int signum);
UV_EXTERN uv_pid_t uv_process_get_pid(const uv_process_t*);
/*
@ -978,16 +1058,18 @@ UV_EXTERN int uv_queue_work(uv_loop_t* loop,
UV_EXTERN int uv_cancel(uv_req_t* req);
struct uv_cpu_times_s {
uint64_t user;
uint64_t nice;
uint64_t sys;
uint64_t idle;
uint64_t irq;
};
struct uv_cpu_info_s {
char* model;
int speed;
struct uv_cpu_times_s {
uint64_t user;
uint64_t nice;
uint64_t sys;
uint64_t idle;
uint64_t irq;
} cpu_times;
struct uv_cpu_times_s cpu_times;
};
struct uv_interface_address_s {
@ -1012,6 +1094,27 @@ struct uv_passwd_s {
char* homedir;
};
struct uv_utsname_s {
char sysname[256];
char release[256];
char version[256];
char machine[256];
/* This struct does not contain the nodename and domainname fields present in
the utsname type. domainname is a GNU extension. Both fields are referred
to as meaningless in the docs. */
};
struct uv_statfs_s {
uint64_t f_type;
uint64_t f_bsize;
uint64_t f_blocks;
uint64_t f_bfree;
uint64_t f_bavail;
uint64_t f_files;
uint64_t f_ffree;
uint64_t f_spare[4];
};
typedef enum {
UV_DIRENT_UNKNOWN,
UV_DIRENT_FILE,
@ -1033,12 +1136,19 @@ UV_EXTERN int uv_get_process_title(char* buffer, size_t size);
UV_EXTERN int uv_set_process_title(const char* title);
UV_EXTERN int uv_resident_set_memory(size_t* rss);
UV_EXTERN int uv_uptime(double* uptime);
UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd);
UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd);
typedef struct {
long tv_sec;
long tv_usec;
} uv_timeval_t;
typedef struct {
int64_t tv_sec;
int32_t tv_usec;
} uv_timeval64_t;
typedef struct {
uv_timeval_t ru_utime; /* user CPU time used */
uv_timeval_t ru_stime; /* system CPU time used */
@ -1064,6 +1174,18 @@ UV_EXTERN int uv_os_homedir(char* buffer, size_t* size);
UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size);
UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd);
UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
UV_EXTERN uv_pid_t uv_os_getpid(void);
UV_EXTERN uv_pid_t uv_os_getppid(void);
#define UV_PRIORITY_LOW 19
#define UV_PRIORITY_BELOW_NORMAL 10
#define UV_PRIORITY_NORMAL 0
#define UV_PRIORITY_ABOVE_NORMAL -7
#define UV_PRIORITY_HIGH -14
#define UV_PRIORITY_HIGHEST -20
UV_EXTERN int uv_os_getpriority(uv_pid_t pid, int* priority);
UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
@ -1073,6 +1195,32 @@ UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses,
UV_EXTERN void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count);
struct uv_env_item_s {
char* name;
char* value;
};
UV_EXTERN int uv_os_environ(uv_env_item_t** envitems, int* count);
UV_EXTERN void uv_os_free_environ(uv_env_item_t* envitems, int count);
UV_EXTERN int uv_os_getenv(const char* name, char* buffer, size_t* size);
UV_EXTERN int uv_os_setenv(const char* name, const char* value);
UV_EXTERN int uv_os_unsetenv(const char* name);
#ifdef MAXHOSTNAMELEN
# define UV_MAXHOSTNAMESIZE (MAXHOSTNAMELEN + 1)
#else
/*
Fallback for the maximum hostname size, including the null terminator. The
Windows gethostname() documentation states that 256 bytes will always be
large enough to hold the null-terminated hostname.
*/
# define UV_MAXHOSTNAMESIZE 256
#endif
UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size);
UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
typedef enum {
UV_FS_UNKNOWN = -1,
@ -1104,9 +1252,23 @@ typedef enum {
UV_FS_READLINK,
UV_FS_CHOWN,
UV_FS_FCHOWN,
UV_FS_REALPATH
UV_FS_REALPATH,
UV_FS_COPYFILE,
UV_FS_LCHOWN,
UV_FS_OPENDIR,
UV_FS_READDIR,
UV_FS_CLOSEDIR,
UV_FS_STATFS,
UV_FS_MKSTEMP
} uv_fs_type;
struct uv_dir_s {
uv_dirent_t* dirents;
size_t nentries;
void* reserved[4];
UV_DIR_PRIVATE_FIELDS
};
/* uv_fs_t is a subclass of uv_req_t. */
struct uv_fs_s {
UV_REQ_FIELDS
@ -1120,6 +1282,12 @@ struct uv_fs_s {
UV_FS_PRIVATE_FIELDS
};
UV_EXTERN uv_fs_type uv_fs_get_type(const uv_fs_t*);
UV_EXTERN ssize_t uv_fs_get_result(const uv_fs_t*);
UV_EXTERN void* uv_fs_get_ptr(const uv_fs_t*);
UV_EXTERN const char* uv_fs_get_path(const uv_fs_t*);
UV_EXTERN uv_stat_t* uv_fs_get_statbuf(uv_fs_t*);
UV_EXTERN void uv_fs_req_cleanup(uv_fs_t* req);
UV_EXTERN int uv_fs_close(uv_loop_t* loop,
uv_fs_t* req,
@ -1149,6 +1317,30 @@ UV_EXTERN int uv_fs_write(uv_loop_t* loop,
unsigned int nbufs,
int64_t offset,
uv_fs_cb cb);
/*
* This flag can be used with uv_fs_copyfile() to return an error if the
* destination already exists.
*/
#define UV_FS_COPYFILE_EXCL 0x0001
/*
* This flag can be used with uv_fs_copyfile() to attempt to create a reflink.
* If copy-on-write is not supported, a fallback copy mechanism is used.
*/
#define UV_FS_COPYFILE_FICLONE 0x0002
/*
* This flag can be used with uv_fs_copyfile() to attempt to create a reflink.
* If copy-on-write is not supported, an error is returned.
*/
#define UV_FS_COPYFILE_FICLONE_FORCE 0x0004
UV_EXTERN int uv_fs_copyfile(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
const char* new_path,
int flags,
uv_fs_cb cb);
UV_EXTERN int uv_fs_mkdir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
@ -1158,6 +1350,10 @@ UV_EXTERN int uv_fs_mkdtemp(uv_loop_t* loop,
uv_fs_t* req,
const char* tpl,
uv_fs_cb cb);
UV_EXTERN int uv_fs_mkstemp(uv_loop_t* loop,
uv_fs_t* req,
const char* tpl,
uv_fs_cb cb);
UV_EXTERN int uv_fs_rmdir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
@ -1169,6 +1365,18 @@ UV_EXTERN int uv_fs_scandir(uv_loop_t* loop,
uv_fs_cb cb);
UV_EXTERN int uv_fs_scandir_next(uv_fs_t* req,
uv_dirent_t* ent);
UV_EXTERN int uv_fs_opendir(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
uv_fs_cb cb);
UV_EXTERN int uv_fs_readdir(uv_loop_t* loop,
uv_fs_t* req,
uv_dir_t* dir,
uv_fs_cb cb);
UV_EXTERN int uv_fs_closedir(uv_loop_t* loop,
uv_fs_t* req,
uv_dir_t* dir,
uv_fs_cb cb);
UV_EXTERN int uv_fs_stat(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
@ -1277,6 +1485,16 @@ UV_EXTERN int uv_fs_fchown(uv_loop_t* loop,
uv_uid_t uid,
uv_gid_t gid,
uv_fs_cb cb);
UV_EXTERN int uv_fs_lchown(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
uv_uid_t uid,
uv_gid_t gid,
uv_fs_cb cb);
UV_EXTERN int uv_fs_statfs(uv_loop_t* loop,
uv_fs_t* req,
const char* path,
uv_fs_cb cb);
enum uv_fs_event {
@ -1324,6 +1542,9 @@ UV_EXTERN int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle);
UV_EXTERN int uv_signal_start(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum);
UV_EXTERN int uv_signal_start_oneshot(uv_signal_t* handle,
uv_signal_cb signal_cb,
int signum);
UV_EXTERN int uv_signal_stop(uv_signal_t* handle);
UV_EXTERN void uv_loadavg(double avg[3]);
@ -1379,6 +1600,41 @@ UV_EXTERN int uv_ip6_name(const struct sockaddr_in6* src, char* dst, size_t size
UV_EXTERN int uv_inet_ntop(int af, const void* src, char* dst, size_t size);
UV_EXTERN int uv_inet_pton(int af, const char* src, void* dst);
struct uv_random_s {
UV_REQ_FIELDS
/* read-only */
uv_loop_t* loop;
/* private */
int status;
void* buf;
size_t buflen;
uv_random_cb cb;
struct uv__work work_req;
};
UV_EXTERN int uv_random(uv_loop_t* loop,
uv_random_t* req,
void *buf,
size_t buflen,
unsigned flags, /* For future extension; must be 0. */
uv_random_cb cb);
#if defined(IF_NAMESIZE)
# define UV_IF_NAMESIZE (IF_NAMESIZE + 1)
#elif defined(IFNAMSIZ)
# define UV_IF_NAMESIZE (IFNAMSIZ + 1)
#else
# define UV_IF_NAMESIZE (16 + 1)
#endif
UV_EXTERN int uv_if_indextoname(unsigned int ifindex,
char* buffer,
size_t* size);
UV_EXTERN int uv_if_indextoiid(unsigned int ifindex,
char* buffer,
size_t* size);
UV_EXTERN int uv_exepath(char* buffer, size_t* size);
UV_EXTERN int uv_cwd(char* buffer, size_t* size);
@ -1387,8 +1643,10 @@ UV_EXTERN int uv_chdir(const char* dir);
UV_EXTERN uint64_t uv_get_free_memory(void);
UV_EXTERN uint64_t uv_get_total_memory(void);
UV_EXTERN uint64_t uv_get_constrained_memory(void);
UV_EXTERN uint64_t uv_hrtime(void);
UV_EXTERN void uv_sleep(unsigned int msec);
UV_EXTERN void uv_disable_stdio_inheritance(void);
@ -1398,6 +1656,7 @@ UV_EXTERN int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr);
UV_EXTERN const char* uv_dlerror(const uv_lib_t* lib);
UV_EXTERN int uv_mutex_init(uv_mutex_t* handle);
UV_EXTERN int uv_mutex_init_recursive(uv_mutex_t* handle);
UV_EXTERN void uv_mutex_destroy(uv_mutex_t* handle);
UV_EXTERN void uv_mutex_lock(uv_mutex_t* handle);
UV_EXTERN int uv_mutex_trylock(uv_mutex_t* handle);
@ -1439,9 +1698,29 @@ UV_EXTERN void uv_key_delete(uv_key_t* key);
UV_EXTERN void* uv_key_get(uv_key_t* key);
UV_EXTERN void uv_key_set(uv_key_t* key, void* value);
UV_EXTERN int uv_gettimeofday(uv_timeval64_t* tv);
typedef void (*uv_thread_cb)(void* arg);
UV_EXTERN int uv_thread_create(uv_thread_t* tid, uv_thread_cb entry, void* arg);
typedef enum {
UV_THREAD_NO_FLAGS = 0x00,
UV_THREAD_HAS_STACK_SIZE = 0x01
} uv_thread_create_flags;
struct uv_thread_options_s {
unsigned int flags;
size_t stack_size;
/* More fields may be added at any time. */
};
typedef struct uv_thread_options_s uv_thread_options_t;
UV_EXTERN int uv_thread_create_ex(uv_thread_t* tid,
const uv_thread_options_t* params,
uv_thread_cb entry,
void* arg);
UV_EXTERN uv_thread_t uv_thread_self(void);
UV_EXTERN int uv_thread_join(uv_thread_t *tid);
UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2);
@ -1464,12 +1743,17 @@ struct uv_loop_s {
/* Loop reference counting. */
unsigned int active_handles;
void* handle_queue[2];
void* active_reqs[2];
union {
void* unused[2];
unsigned int count;
} active_reqs;
/* Internal flag to signal loop stop. */
unsigned int stop_flag;
UV_LOOP_PRIVATE_FIELDS
};
UV_EXTERN void* uv_loop_get_data(const uv_loop_t*);
UV_EXTERN void uv_loop_set_data(uv_loop_t*, void* data);
/* Don't export the private CPP symbols. */
#undef UV_HANDLE_TYPE_PRIVATE
@ -1490,6 +1774,7 @@ struct uv_loop_s {
#undef UV_SIGNAL_PRIVATE_FIELDS
#undef UV_LOOP_PRIVATE_FIELDS
#undef UV_LOOP_PRIVATE_PLATFORM_FIELDS
#undef UV__ERR
#ifdef __cplusplus
}

View File

@ -23,6 +23,11 @@
#define UV_ERRNO_H_
#include <errno.h>
#if EDOM > 0
# define UV__ERR(x) (-(x))
#else
# define UV__ERR(x) (x)
#endif
#define UV__EOF (-4095)
#define UV__UNKNOWN (-4094)
@ -46,355 +51,355 @@
* a fairly common practice for Windows programmers to redefine errno codes.
*/
#if defined(E2BIG) && !defined(_WIN32)
# define UV__E2BIG (-E2BIG)
# define UV__E2BIG UV__ERR(E2BIG)
#else
# define UV__E2BIG (-4093)
#endif
#if defined(EACCES) && !defined(_WIN32)
# define UV__EACCES (-EACCES)
# define UV__EACCES UV__ERR(EACCES)
#else
# define UV__EACCES (-4092)
#endif
#if defined(EADDRINUSE) && !defined(_WIN32)
# define UV__EADDRINUSE (-EADDRINUSE)
# define UV__EADDRINUSE UV__ERR(EADDRINUSE)
#else
# define UV__EADDRINUSE (-4091)
#endif
#if defined(EADDRNOTAVAIL) && !defined(_WIN32)
# define UV__EADDRNOTAVAIL (-EADDRNOTAVAIL)
# define UV__EADDRNOTAVAIL UV__ERR(EADDRNOTAVAIL)
#else
# define UV__EADDRNOTAVAIL (-4090)
#endif
#if defined(EAFNOSUPPORT) && !defined(_WIN32)
# define UV__EAFNOSUPPORT (-EAFNOSUPPORT)
# define UV__EAFNOSUPPORT UV__ERR(EAFNOSUPPORT)
#else
# define UV__EAFNOSUPPORT (-4089)
#endif
#if defined(EAGAIN) && !defined(_WIN32)
# define UV__EAGAIN (-EAGAIN)
# define UV__EAGAIN UV__ERR(EAGAIN)
#else
# define UV__EAGAIN (-4088)
#endif
#if defined(EALREADY) && !defined(_WIN32)
# define UV__EALREADY (-EALREADY)
# define UV__EALREADY UV__ERR(EALREADY)
#else
# define UV__EALREADY (-4084)
#endif
#if defined(EBADF) && !defined(_WIN32)
# define UV__EBADF (-EBADF)
# define UV__EBADF UV__ERR(EBADF)
#else
# define UV__EBADF (-4083)
#endif
#if defined(EBUSY) && !defined(_WIN32)
# define UV__EBUSY (-EBUSY)
# define UV__EBUSY UV__ERR(EBUSY)
#else
# define UV__EBUSY (-4082)
#endif
#if defined(ECANCELED) && !defined(_WIN32)
# define UV__ECANCELED (-ECANCELED)
# define UV__ECANCELED UV__ERR(ECANCELED)
#else
# define UV__ECANCELED (-4081)
#endif
#if defined(ECHARSET) && !defined(_WIN32)
# define UV__ECHARSET (-ECHARSET)
# define UV__ECHARSET UV__ERR(ECHARSET)
#else
# define UV__ECHARSET (-4080)
#endif
#if defined(ECONNABORTED) && !defined(_WIN32)
# define UV__ECONNABORTED (-ECONNABORTED)
# define UV__ECONNABORTED UV__ERR(ECONNABORTED)
#else
# define UV__ECONNABORTED (-4079)
#endif
#if defined(ECONNREFUSED) && !defined(_WIN32)
# define UV__ECONNREFUSED (-ECONNREFUSED)
# define UV__ECONNREFUSED UV__ERR(ECONNREFUSED)
#else
# define UV__ECONNREFUSED (-4078)
#endif
#if defined(ECONNRESET) && !defined(_WIN32)
# define UV__ECONNRESET (-ECONNRESET)
# define UV__ECONNRESET UV__ERR(ECONNRESET)
#else
# define UV__ECONNRESET (-4077)
#endif
#if defined(EDESTADDRREQ) && !defined(_WIN32)
# define UV__EDESTADDRREQ (-EDESTADDRREQ)
# define UV__EDESTADDRREQ UV__ERR(EDESTADDRREQ)
#else
# define UV__EDESTADDRREQ (-4076)
#endif
#if defined(EEXIST) && !defined(_WIN32)
# define UV__EEXIST (-EEXIST)
# define UV__EEXIST UV__ERR(EEXIST)
#else
# define UV__EEXIST (-4075)
#endif
#if defined(EFAULT) && !defined(_WIN32)
# define UV__EFAULT (-EFAULT)
# define UV__EFAULT UV__ERR(EFAULT)
#else
# define UV__EFAULT (-4074)
#endif
#if defined(EHOSTUNREACH) && !defined(_WIN32)
# define UV__EHOSTUNREACH (-EHOSTUNREACH)
# define UV__EHOSTUNREACH UV__ERR(EHOSTUNREACH)
#else
# define UV__EHOSTUNREACH (-4073)
#endif
#if defined(EINTR) && !defined(_WIN32)
# define UV__EINTR (-EINTR)
# define UV__EINTR UV__ERR(EINTR)
#else
# define UV__EINTR (-4072)
#endif
#if defined(EINVAL) && !defined(_WIN32)
# define UV__EINVAL (-EINVAL)
# define UV__EINVAL UV__ERR(EINVAL)
#else
# define UV__EINVAL (-4071)
#endif
#if defined(EIO) && !defined(_WIN32)
# define UV__EIO (-EIO)
# define UV__EIO UV__ERR(EIO)
#else
# define UV__EIO (-4070)
#endif
#if defined(EISCONN) && !defined(_WIN32)
# define UV__EISCONN (-EISCONN)
# define UV__EISCONN UV__ERR(EISCONN)
#else
# define UV__EISCONN (-4069)
#endif
#if defined(EISDIR) && !defined(_WIN32)
# define UV__EISDIR (-EISDIR)
# define UV__EISDIR UV__ERR(EISDIR)
#else
# define UV__EISDIR (-4068)
#endif
#if defined(ELOOP) && !defined(_WIN32)
# define UV__ELOOP (-ELOOP)
# define UV__ELOOP UV__ERR(ELOOP)
#else
# define UV__ELOOP (-4067)
#endif
#if defined(EMFILE) && !defined(_WIN32)
# define UV__EMFILE (-EMFILE)
# define UV__EMFILE UV__ERR(EMFILE)
#else
# define UV__EMFILE (-4066)
#endif
#if defined(EMSGSIZE) && !defined(_WIN32)
# define UV__EMSGSIZE (-EMSGSIZE)
# define UV__EMSGSIZE UV__ERR(EMSGSIZE)
#else
# define UV__EMSGSIZE (-4065)
#endif
#if defined(ENAMETOOLONG) && !defined(_WIN32)
# define UV__ENAMETOOLONG (-ENAMETOOLONG)
# define UV__ENAMETOOLONG UV__ERR(ENAMETOOLONG)
#else
# define UV__ENAMETOOLONG (-4064)
#endif
#if defined(ENETDOWN) && !defined(_WIN32)
# define UV__ENETDOWN (-ENETDOWN)
# define UV__ENETDOWN UV__ERR(ENETDOWN)
#else
# define UV__ENETDOWN (-4063)
#endif
#if defined(ENETUNREACH) && !defined(_WIN32)
# define UV__ENETUNREACH (-ENETUNREACH)
# define UV__ENETUNREACH UV__ERR(ENETUNREACH)
#else
# define UV__ENETUNREACH (-4062)
#endif
#if defined(ENFILE) && !defined(_WIN32)
# define UV__ENFILE (-ENFILE)
# define UV__ENFILE UV__ERR(ENFILE)
#else
# define UV__ENFILE (-4061)
#endif
#if defined(ENOBUFS) && !defined(_WIN32)
# define UV__ENOBUFS (-ENOBUFS)
# define UV__ENOBUFS UV__ERR(ENOBUFS)
#else
# define UV__ENOBUFS (-4060)
#endif
#if defined(ENODEV) && !defined(_WIN32)
# define UV__ENODEV (-ENODEV)
# define UV__ENODEV UV__ERR(ENODEV)
#else
# define UV__ENODEV (-4059)
#endif
#if defined(ENOENT) && !defined(_WIN32)
# define UV__ENOENT (-ENOENT)
# define UV__ENOENT UV__ERR(ENOENT)
#else
# define UV__ENOENT (-4058)
#endif
#if defined(ENOMEM) && !defined(_WIN32)
# define UV__ENOMEM (-ENOMEM)
# define UV__ENOMEM UV__ERR(ENOMEM)
#else
# define UV__ENOMEM (-4057)
#endif
#if defined(ENONET) && !defined(_WIN32)
# define UV__ENONET (-ENONET)
# define UV__ENONET UV__ERR(ENONET)
#else
# define UV__ENONET (-4056)
#endif
#if defined(ENOSPC) && !defined(_WIN32)
# define UV__ENOSPC (-ENOSPC)
# define UV__ENOSPC UV__ERR(ENOSPC)
#else
# define UV__ENOSPC (-4055)
#endif
#if defined(ENOSYS) && !defined(_WIN32)
# define UV__ENOSYS (-ENOSYS)
# define UV__ENOSYS UV__ERR(ENOSYS)
#else
# define UV__ENOSYS (-4054)
#endif
#if defined(ENOTCONN) && !defined(_WIN32)
# define UV__ENOTCONN (-ENOTCONN)
# define UV__ENOTCONN UV__ERR(ENOTCONN)
#else
# define UV__ENOTCONN (-4053)
#endif
#if defined(ENOTDIR) && !defined(_WIN32)
# define UV__ENOTDIR (-ENOTDIR)
# define UV__ENOTDIR UV__ERR(ENOTDIR)
#else
# define UV__ENOTDIR (-4052)
#endif
#if defined(ENOTEMPTY) && !defined(_WIN32)
# define UV__ENOTEMPTY (-ENOTEMPTY)
# define UV__ENOTEMPTY UV__ERR(ENOTEMPTY)
#else
# define UV__ENOTEMPTY (-4051)
#endif
#if defined(ENOTSOCK) && !defined(_WIN32)
# define UV__ENOTSOCK (-ENOTSOCK)
# define UV__ENOTSOCK UV__ERR(ENOTSOCK)
#else
# define UV__ENOTSOCK (-4050)
#endif
#if defined(ENOTSUP) && !defined(_WIN32)
# define UV__ENOTSUP (-ENOTSUP)
# define UV__ENOTSUP UV__ERR(ENOTSUP)
#else
# define UV__ENOTSUP (-4049)
#endif
#if defined(EPERM) && !defined(_WIN32)
# define UV__EPERM (-EPERM)
# define UV__EPERM UV__ERR(EPERM)
#else
# define UV__EPERM (-4048)
#endif
#if defined(EPIPE) && !defined(_WIN32)
# define UV__EPIPE (-EPIPE)
# define UV__EPIPE UV__ERR(EPIPE)
#else
# define UV__EPIPE (-4047)
#endif
#if defined(EPROTO) && !defined(_WIN32)
# define UV__EPROTO (-EPROTO)
# define UV__EPROTO UV__ERR(EPROTO)
#else
# define UV__EPROTO (-4046)
# define UV__EPROTO UV__ERR(4046)
#endif
#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
# define UV__EPROTONOSUPPORT (-EPROTONOSUPPORT)
# define UV__EPROTONOSUPPORT UV__ERR(EPROTONOSUPPORT)
#else
# define UV__EPROTONOSUPPORT (-4045)
#endif
#if defined(EPROTOTYPE) && !defined(_WIN32)
# define UV__EPROTOTYPE (-EPROTOTYPE)
# define UV__EPROTOTYPE UV__ERR(EPROTOTYPE)
#else
# define UV__EPROTOTYPE (-4044)
#endif
#if defined(EROFS) && !defined(_WIN32)
# define UV__EROFS (-EROFS)
# define UV__EROFS UV__ERR(EROFS)
#else
# define UV__EROFS (-4043)
#endif
#if defined(ESHUTDOWN) && !defined(_WIN32)
# define UV__ESHUTDOWN (-ESHUTDOWN)
# define UV__ESHUTDOWN UV__ERR(ESHUTDOWN)
#else
# define UV__ESHUTDOWN (-4042)
#endif
#if defined(ESPIPE) && !defined(_WIN32)
# define UV__ESPIPE (-ESPIPE)
# define UV__ESPIPE UV__ERR(ESPIPE)
#else
# define UV__ESPIPE (-4041)
#endif
#if defined(ESRCH) && !defined(_WIN32)
# define UV__ESRCH (-ESRCH)
# define UV__ESRCH UV__ERR(ESRCH)
#else
# define UV__ESRCH (-4040)
#endif
#if defined(ETIMEDOUT) && !defined(_WIN32)
# define UV__ETIMEDOUT (-ETIMEDOUT)
# define UV__ETIMEDOUT UV__ERR(ETIMEDOUT)
#else
# define UV__ETIMEDOUT (-4039)
#endif
#if defined(ETXTBSY) && !defined(_WIN32)
# define UV__ETXTBSY (-ETXTBSY)
# define UV__ETXTBSY UV__ERR(ETXTBSY)
#else
# define UV__ETXTBSY (-4038)
#endif
#if defined(EXDEV) && !defined(_WIN32)
# define UV__EXDEV (-EXDEV)
# define UV__EXDEV UV__ERR(EXDEV)
#else
# define UV__EXDEV (-4037)
#endif
#if defined(EFBIG) && !defined(_WIN32)
# define UV__EFBIG (-EFBIG)
# define UV__EFBIG UV__ERR(EFBIG)
#else
# define UV__EFBIG (-4036)
#endif
#if defined(ENOPROTOOPT) && !defined(_WIN32)
# define UV__ENOPROTOOPT (-ENOPROTOOPT)
# define UV__ENOPROTOOPT UV__ERR(ENOPROTOOPT)
#else
# define UV__ENOPROTOOPT (-4035)
#endif
#if defined(ERANGE) && !defined(_WIN32)
# define UV__ERANGE (-ERANGE)
# define UV__ERANGE UV__ERR(ERANGE)
#else
# define UV__ERANGE (-4034)
#endif
#if defined(ENXIO) && !defined(_WIN32)
# define UV__ENXIO (-ENXIO)
# define UV__ENXIO UV__ERR(ENXIO)
#else
# define UV__ENXIO (-4033)
#endif
#if defined(EMLINK) && !defined(_WIN32)
# define UV__EMLINK (-EMLINK)
# define UV__EMLINK UV__ERR(EMLINK)
#else
# define UV__EMLINK (-4032)
#endif
@ -404,7 +409,7 @@
* icky to hard-code it.
*/
#if defined(EHOSTDOWN) && !defined(_WIN32)
# define UV__EHOSTDOWN (-EHOSTDOWN)
# define UV__EHOSTDOWN UV__ERR(EHOSTDOWN)
#elif defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
@ -416,4 +421,28 @@
# define UV__EHOSTDOWN (-4031)
#endif
#if defined(EREMOTEIO) && !defined(_WIN32)
# define UV__EREMOTEIO UV__ERR(EREMOTEIO)
#else
# define UV__EREMOTEIO (-4030)
#endif
#if defined(ENOTTY) && !defined(_WIN32)
# define UV__ENOTTY UV__ERR(ENOTTY)
#else
# define UV__ENOTTY (-4029)
#endif
#if defined(EFTYPE) && !defined(_WIN32)
# define UV__EFTYPE UV__ERR(EFTYPE)
#else
# define UV__EFTYPE (-4028)
#endif
#if defined(EILSEQ) && !defined(_WIN32)
# define UV__EILSEQ UV__ERR(EILSEQ)
#else
# define UV__EILSEQ (-4027)
#endif
#endif /* UV_ERRNO_H_ */

33
deps/libuv/include/uv/os390.h vendored Normal file
View File

@ -0,0 +1,33 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_MVS_H
#define UV_MVS_H
#define UV_PLATFORM_SEM_T long
#define UV_PLATFORM_LOOP_FIELDS \
void* ep; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
char rfis_rftok[8]; \
#endif /* UV_MVS_H */

31
deps/libuv/include/uv/posix.h vendored Normal file
View File

@ -0,0 +1,31 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_POSIX_H
#define UV_POSIX_H
#define UV_PLATFORM_LOOP_FIELDS \
struct pollfd* poll_fds; \
size_t poll_fds_used; \
size_t poll_fds_size; \
unsigned char poll_fds_iterating; \
#endif /* UV_POSIX_H */

View File

@ -31,35 +31,44 @@
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <netdb.h> /* MAXHOSTNAMELEN on Solaris */
#include <termios.h>
#include <pwd.h>
#if !defined(__MVS__)
#include <semaphore.h>
#include <sys/param.h> /* MAXHOSTNAMELEN on Linux and the BSDs */
#endif
#include <pthread.h>
#include <signal.h>
#include "uv-threadpool.h"
#include "uv/threadpool.h"
#if defined(__linux__)
# include "uv-linux.h"
# include "uv/linux.h"
#elif defined (__MVS__)
# include "uv/os390.h"
#elif defined(__PASE__) /* __PASE__ and _AIX are both defined on IBM i */
# include "uv/posix.h" /* IBM i needs uv/posix.h, not uv/aix.h */
#elif defined(_AIX)
# include "uv-aix.h"
# include "uv/aix.h"
#elif defined(__sun)
# include "uv-sunos.h"
# include "uv/sunos.h"
#elif defined(__APPLE__)
# include "uv-darwin.h"
# include "uv/darwin.h"
#elif defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include "uv-bsd.h"
#endif
#ifndef PTHREAD_BARRIER_SERIAL_THREAD
# include "pthread-barrier.h"
# include "uv/bsd.h"
#elif defined(__CYGWIN__) || \
defined(__MSYS__) || \
defined(__GNU__)
# include "uv/posix.h"
#elif defined(__HAIKU__)
# include "uv/posix.h"
#endif
#ifndef NI_MAXHOST
@ -75,7 +84,6 @@
#endif
struct uv__io_s;
struct uv__async;
struct uv_loop_s;
typedef void (*uv__io_cb)(struct uv_loop_s* loop,
@ -93,16 +101,6 @@ struct uv__io_s {
UV_IO_PRIVATE_PLATFORM_FIELDS
};
typedef void (*uv__async_cb)(struct uv_loop_s* loop,
struct uv__async* w,
unsigned int nevents);
struct uv__async {
uv__async_cb cb;
uv__io_t io_watcher;
int wfd;
};
#ifndef UV_PLATFORM_SEM_T
# define UV_PLATFORM_SEM_T sem_t
#endif
@ -128,6 +126,7 @@ typedef struct uv_buf_t {
typedef int uv_file;
typedef int uv_os_sock_t;
typedef int uv_os_fd_t;
typedef pid_t uv_pid_t;
#define UV_ONCE_INIT PTHREAD_ONCE_INIT
@ -138,8 +137,30 @@ typedef pthread_rwlock_t uv_rwlock_t;
typedef UV_PLATFORM_SEM_T uv_sem_t;
typedef pthread_cond_t uv_cond_t;
typedef pthread_key_t uv_key_t;
typedef pthread_barrier_t uv_barrier_t;
/* Note: guard clauses should match uv_barrier_init's in src/unix/thread.c. */
#if defined(_AIX) || \
defined(__OpenBSD__) || \
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Merge into uv_barrier_t in v2. */
struct _uv_barrier {
uv_mutex_t mutex;
uv_cond_t cond;
unsigned threshold;
unsigned in;
unsigned out;
};
typedef struct {
struct _uv_barrier* b;
# if defined(PTHREAD_BARRIER_SERIAL_THREAD)
/* TODO(bnoordhuis) Remove padding in v2. */
char pad[sizeof(pthread_barrier_t) - sizeof(struct _uv_barrier*)];
# endif
} uv_barrier_t;
#else
typedef pthread_barrier_t uv_barrier_t;
#endif
/* Platform-specific definitions for uv_spawn support. */
typedef gid_t uv_gid_t;
@ -147,6 +168,9 @@ typedef uid_t uv_uid_t;
typedef struct dirent uv__dirent_t;
#define UV_DIR_PRIVATE_FIELDS \
DIR* dir;
#if defined(DT_UNKNOWN)
# define HAVE_DIRENT_TYPES
# if defined(DT_REG)
@ -212,7 +236,9 @@ typedef struct {
void* check_handles[2]; \
void* idle_handles[2]; \
void* async_handles[2]; \
struct uv__async async_watcher; \
void (*async_unused)(void); /* TODO(bnoordhuis) Remove in libuv v2. */ \
uv__io_t async_io_watcher; \
int async_wfd; \
struct { \
void* min; \
unsigned int nelts; \
@ -368,4 +394,112 @@ typedef struct {
uv_fs_event_cb cb; \
UV_PLATFORM_FS_EVENT_FIELDS \
/* fs open() flags supported on this platform: */
#if defined(O_APPEND)
# define UV_FS_O_APPEND O_APPEND
#else
# define UV_FS_O_APPEND 0
#endif
#if defined(O_CREAT)
# define UV_FS_O_CREAT O_CREAT
#else
# define UV_FS_O_CREAT 0
#endif
#if defined(__linux__) && defined(__arm__)
# define UV_FS_O_DIRECT 0x10000
#elif defined(__linux__) && defined(__m68k__)
# define UV_FS_O_DIRECT 0x10000
#elif defined(__linux__) && defined(__mips__)
# define UV_FS_O_DIRECT 0x08000
#elif defined(__linux__) && defined(__powerpc__)
# define UV_FS_O_DIRECT 0x20000
#elif defined(__linux__) && defined(__s390x__)
# define UV_FS_O_DIRECT 0x04000
#elif defined(__linux__) && defined(__x86_64__)
# define UV_FS_O_DIRECT 0x04000
#elif defined(O_DIRECT)
# define UV_FS_O_DIRECT O_DIRECT
#else
# define UV_FS_O_DIRECT 0
#endif
#if defined(O_DIRECTORY)
# define UV_FS_O_DIRECTORY O_DIRECTORY
#else
# define UV_FS_O_DIRECTORY 0
#endif
#if defined(O_DSYNC)
# define UV_FS_O_DSYNC O_DSYNC
#else
# define UV_FS_O_DSYNC 0
#endif
#if defined(O_EXCL)
# define UV_FS_O_EXCL O_EXCL
#else
# define UV_FS_O_EXCL 0
#endif
#if defined(O_EXLOCK)
# define UV_FS_O_EXLOCK O_EXLOCK
#else
# define UV_FS_O_EXLOCK 0
#endif
#if defined(O_NOATIME)
# define UV_FS_O_NOATIME O_NOATIME
#else
# define UV_FS_O_NOATIME 0
#endif
#if defined(O_NOCTTY)
# define UV_FS_O_NOCTTY O_NOCTTY
#else
# define UV_FS_O_NOCTTY 0
#endif
#if defined(O_NOFOLLOW)
# define UV_FS_O_NOFOLLOW O_NOFOLLOW
#else
# define UV_FS_O_NOFOLLOW 0
#endif
#if defined(O_NONBLOCK)
# define UV_FS_O_NONBLOCK O_NONBLOCK
#else
# define UV_FS_O_NONBLOCK 0
#endif
#if defined(O_RDONLY)
# define UV_FS_O_RDONLY O_RDONLY
#else
# define UV_FS_O_RDONLY 0
#endif
#if defined(O_RDWR)
# define UV_FS_O_RDWR O_RDWR
#else
# define UV_FS_O_RDWR 0
#endif
#if defined(O_SYMLINK)
# define UV_FS_O_SYMLINK O_SYMLINK
#else
# define UV_FS_O_SYMLINK 0
#endif
#if defined(O_SYNC)
# define UV_FS_O_SYNC O_SYNC
#else
# define UV_FS_O_SYNC 0
#endif
#if defined(O_TRUNC)
# define UV_FS_O_TRUNC O_TRUNC
#else
# define UV_FS_O_TRUNC 0
#endif
#if defined(O_WRONLY)
# define UV_FS_O_WRONLY O_WRONLY
#else
# define UV_FS_O_WRONLY 0
#endif
/* fs open() flags supported on other platforms: */
#define UV_FS_O_FILEMAP 0
#define UV_FS_O_RANDOM 0
#define UV_FS_O_SHORT_LIVED 0
#define UV_FS_O_SEQUENTIAL 0
#define UV_FS_O_TEMPORARY 0
#endif /* UV_UNIX_H */

View File

@ -31,8 +31,8 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 10
#define UV_VERSION_PATCH 0
#define UV_VERSION_MINOR 34
#define UV_VERSION_PATCH 2
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""

View File

@ -20,11 +20,12 @@
*/
#ifndef _WIN32_WINNT
# define _WIN32_WINNT 0x0502
# define _WIN32_WINNT 0x0600
#endif
#if !defined(_SSIZE_T_) && !defined(_SSIZE_T_DEFINED)
typedef intptr_t ssize_t;
# define SSIZE_MAX INTPTR_MAX
# define _SSIZE_T_
# define _SSIZE_T_DEFINED
#endif
@ -49,16 +50,17 @@ typedef struct pollfd {
#include <process.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/stat.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "tree.h"
#include "uv-threadpool.h"
#include "uv/tree.h"
#include "uv/threadpool.h"
#define MAX_PIPENAME_LEN 256
@ -85,8 +87,16 @@ typedef struct pollfd {
#define SIGKILL 9
#define SIGWINCH 28
/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many */
/* unix-like platforms. However MinGW doesn't define it, so we do. */
/* Redefine NSIG to take SIGWINCH into consideration */
#if defined(NSIG) && NSIG <= SIGWINCH
# undef NSIG
#endif
#ifndef NSIG
# define NSIG SIGWINCH + 1
#endif
/* The CRT defines SIGABRT_COMPAT as 6, which equals SIGABRT on many unix-like
* platforms. However MinGW doesn't define it, so we do. */
#ifndef SIGABRT_COMPAT
# define SIGABRT_COMPAT 6
#endif
@ -221,6 +231,7 @@ typedef struct uv_buf_t {
typedef int uv_file;
typedef SOCKET uv_os_sock_t;
typedef HANDLE uv_os_fd_t;
typedef int uv_pid_t;
typedef HANDLE uv_thread_t;
@ -242,7 +253,7 @@ typedef union {
CRITICAL_SECTION waiters_count_lock;
HANDLE signal_event;
HANDLE broadcast_event;
} fallback;
} unused_; /* TODO: retained for ABI compatibility; remove me in v2.x. */
} uv_cond_t;
typedef union {
@ -290,6 +301,11 @@ typedef struct uv__dirent_s {
char d_name[1];
} uv__dirent_t;
#define UV_DIR_PRIVATE_FIELDS \
HANDLE dir_handle; \
WIN32_FIND_DATAW find_data; \
BOOL need_find_call;
#define HAVE_DIRENT_TYPES
#define UV__DT_DIR UV_DIRENT_DIR
#define UV__DT_FILE UV_DIRENT_FILE
@ -306,8 +322,6 @@ typedef struct {
char* errmsg;
} uv_lib_t;
RB_HEAD(uv_timer_tree_s, uv_timer_s);
#define UV_LOOP_PRIVATE_FIELDS \
/* The loop's I/O completion port */ \
HANDLE iocp; \
@ -319,8 +333,8 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
uv_req_t* pending_reqs_tail; \
/* Head of a single-linked list of closed handles */ \
uv_handle_t* endgame_handles; \
/* The head of the timers tree */ \
struct uv_timer_tree_s timers; \
/* TODO(bnoordhuis) Stop heap-allocating |timer_heap| in libuv v2.x. */ \
void* timer_heap; \
/* Lists of active loop (prepare / check / idle) watchers */ \
uv_prepare_t* prepare_handles; \
uv_check_t* check_handles; \
@ -366,10 +380,10 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
} u; \
struct uv_req_s* next_req;
#define UV_WRITE_PRIVATE_FIELDS \
int ipc_header; \
uv_buf_t write_buffer; \
HANDLE event_handle; \
#define UV_WRITE_PRIVATE_FIELDS \
int coalesced; \
uv_buf_t write_buffer; \
HANDLE event_handle; \
HANDLE wait_handle;
#define UV_CONNECT_PRIVATE_FIELDS \
@ -457,16 +471,17 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
#define uv_pipe_connection_fields \
uv_timer_t* eof_timer; \
uv_write_t ipc_header_write_req; \
int ipc_pid; \
uint64_t remaining_ipc_rawdata_bytes; \
struct { \
void* queue[2]; \
int queue_len; \
} pending_ipc_info; \
uv_write_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
DWORD ipc_remote_pid; \
union { \
uint32_t payload_remaining; \
uint64_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
} ipc_data_frame; \
void* ipc_xfer_queue[2]; \
int ipc_xfer_queue_length; \
uv_write_t* non_overlapped_writes_tail; \
uv_mutex_t readfile_mutex; \
volatile HANDLE readfile_thread;
CRITICAL_SECTION readfile_thread_lock; \
volatile HANDLE readfile_thread_handle;
#define UV_PIPE_PRIVATE_FIELDS \
HANDLE handle; \
@ -476,8 +491,8 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
struct { uv_pipe_connection_fields } conn; \
} pipe;
/* TODO: put the parser states in an union - TTY handles are always */
/* half-duplex so read-state can safely overlap write-state. */
/* TODO: put the parser states in an union - TTY handles are always half-duplex
* so read-state can safely overlap write-state. */
#define UV_TTY_PRIVATE_FIELDS \
HANDLE handle; \
union { \
@ -526,8 +541,9 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
unsigned char events;
#define UV_TIMER_PRIVATE_FIELDS \
RB_ENTRY(uv_timer_s) tree_entry; \
uint64_t due; \
void* heap_node[3]; \
int unused; \
uint64_t timeout; \
uint64_t repeat; \
uint64_t start_id; \
uv_timer_cb timer_cb;
@ -647,3 +663,29 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
#ifndef X_OK
#define X_OK 1
#endif
/* fs open() flags supported on this platform: */
#define UV_FS_O_APPEND _O_APPEND
#define UV_FS_O_CREAT _O_CREAT
#define UV_FS_O_EXCL _O_EXCL
#define UV_FS_O_FILEMAP 0x20000000
#define UV_FS_O_RANDOM _O_RANDOM
#define UV_FS_O_RDONLY _O_RDONLY
#define UV_FS_O_RDWR _O_RDWR
#define UV_FS_O_SEQUENTIAL _O_SEQUENTIAL
#define UV_FS_O_SHORT_LIVED _O_SHORT_LIVED
#define UV_FS_O_TEMPORARY _O_TEMPORARY
#define UV_FS_O_TRUNC _O_TRUNC
#define UV_FS_O_WRONLY _O_WRONLY
/* fs open() flags supported on other platforms (or mapped on this platform): */
#define UV_FS_O_DIRECT 0x02000000 /* FILE_FLAG_NO_BUFFERING */
#define UV_FS_O_DIRECTORY 0
#define UV_FS_O_DSYNC 0x04000000 /* FILE_FLAG_WRITE_THROUGH */
#define UV_FS_O_EXLOCK 0x10000000 /* EXCLUSIVE SHARING MODE */
#define UV_FS_O_NOATIME 0
#define UV_FS_O_NOCTTY 0
#define UV_FS_O_NOFOLLOW 0
#define UV_FS_O_NONBLOCK 0
#define UV_FS_O_SYMLINK 0
#define UV_FS_O_SYNC 0x08000000 /* FILE_FLAG_WRITE_THROUGH */

86
deps/libuv/libuv.nsi vendored
View File

@ -1,86 +0,0 @@
; NSIS installer script for libuv
!include "MUI2.nsh"
Name "libuv"
OutFile "libuv-${ARCH}-${VERSION}.exe"
!include "x64.nsh"
# Default install location, for 32-bit files
InstallDir "$PROGRAMFILES\libuv"
# Override install and registry locations if this is a 64-bit install.
function .onInit
${If} ${ARCH} == "x64"
SetRegView 64
StrCpy $INSTDIR "$PROGRAMFILES64\libuv"
${EndIf}
functionEnd
;--------------------------------
; Installer pages
!insertmacro MUI_PAGE_WELCOME
!insertmacro MUI_PAGE_DIRECTORY
!insertmacro MUI_PAGE_INSTFILES
!insertmacro MUI_PAGE_FINISH
;--------------------------------
; Uninstaller pages
!insertmacro MUI_UNPAGE_WELCOME
!insertmacro MUI_UNPAGE_CONFIRM
!insertmacro MUI_UNPAGE_INSTFILES
!insertmacro MUI_UNPAGE_FINISH
;--------------------------------
; Languages
!insertmacro MUI_LANGUAGE "English"
;--------------------------------
; Installer sections
Section "Files" SecInstall
SectionIn RO
SetOutPath "$INSTDIR"
File "Release\*.dll"
File "Release\*.lib"
File "LICENSE"
File "README.md"
SetOutPath "$INSTDIR\include"
File "include\uv.h"
File "include\uv-errno.h"
File "include\uv-threadpool.h"
File "include\uv-version.h"
File "include\uv-win.h"
File "include\tree.h"
WriteUninstaller "$INSTDIR\Uninstall.exe"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayName" "libuv-${ARCH}-${VERSION}"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "UninstallString" "$\"$INSTDIR\Uninstall.exe$\""
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "QuietUninstallString" "$\"$INSTDIR\Uninstall.exe$\" /S"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "HelpLink" "http://libuv.org/"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "URLInfoAbout" "http://libuv.org/"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "DisplayVersion" "${VERSION}"
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoModify" "1"
WriteRegDWORD HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}" "NoRepair" "1"
SectionEnd
Section "Uninstall"
Delete "$INSTDIR\libuv.dll"
Delete "$INSTDIR\libuv.lib"
Delete "$INSTDIR\LICENSE"
Delete "$INSTDIR\README.md"
Delete "$INSTDIR\include\uv.h"
Delete "$INSTDIR\include\uv-errno.h"
Delete "$INSTDIR\include\uv-threadpool.h"
Delete "$INSTDIR\include\uv-version.h"
Delete "$INSTDIR\include\uv-win.h"
Delete "$INSTDIR\include\tree.h"
Delete "$INSTDIR\Uninstall.exe"
RMDir "$INSTDIR"
DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\libuv-${ARCH}-${VERSION}"
SectionEnd

View File

@ -1,11 +0,0 @@
prefix=@prefix@
exec_prefix=${prefix}
libdir=@libdir@
includedir=@includedir@
Name: @PACKAGE_NAME@
Version: @PACKAGE_VERSION@
Description: multi-platform support library with a focus on asynchronous I/O.
Libs: -L${libdir} -luv @LIBS@
Cflags: -I${includedir}

View File

@ -1,4 +0,0 @@
# Ignore libtoolize-generated files.
*.m4
!as_case.m4
!libuv-check-flags.m4

View File

@ -1,21 +0,0 @@
# AS_CASE(WORD, [PATTERN1], [IF-MATCHED1]...[DEFAULT])
# ----------------------------------------------------
# Expand into
# | case WORD in
# | PATTERN1) IF-MATCHED1 ;;
# | ...
# | *) DEFAULT ;;
# | esac
m4_define([_AS_CASE],
[m4_if([$#], 0, [m4_fatal([$0: too few arguments: $#])],
[$#], 1, [ *) $1 ;;],
[$#], 2, [ $1) m4_default([$2], [:]) ;;],
[ $1) m4_default([$2], [:]) ;;
$0(m4_shiftn(2, $@))])dnl
])
m4_defun([AS_CASE],
[m4_ifval([$2$3],
[case $1 in
_AS_CASE(m4_shift($@))
esac])])

View File

@ -1,319 +0,0 @@
dnl Macros to check the presence of generic (non-typed) symbols.
dnl Copyright (c) 2006-2008 Diego Pettenà <flameeyes gmail com>
dnl Copyright (c) 2006-2008 xine project
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
dnl the Free Software Foundation; either version 3, or (at your option)
dnl any later version.
dnl
dnl This program is distributed in the hope that it will be useful,
dnl but WITHOUT ANY WARRANTY; without even the implied warranty of
dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
dnl GNU General Public License for more details.
dnl
dnl You should have received a copy of the GNU General Public License
dnl along with this program; if not, write to the Free Software
dnl Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
dnl 02110-1301, USA.
dnl
dnl As a special exception, the copyright owners of the
dnl macro gives unlimited permission to copy, distribute and modify the
dnl configure scripts that are the output of Autoconf when processing the
dnl Macro. You need not follow the terms of the GNU General Public
dnl License when using or distributing such scripts, even though portions
dnl of the text of the Macro appear in them. The GNU General Public
dnl License (GPL) does govern all other use of the material that
dnl constitutes the Autoconf Macro.
dnl
dnl This special exception to the GPL applies to versions of the
dnl Autoconf Macro released by this project. When you make and
dnl distribute a modified version of the Autoconf Macro, you may extend
dnl this special exception to the GPL to apply to your modified version as
dnl well.
dnl Check if the flag is supported by compiler
dnl CC_CHECK_CFLAGS_SILENT([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND])
AC_DEFUN([CC_CHECK_CFLAGS_SILENT], [
AC_CACHE_VAL(AS_TR_SH([cc_cv_cflags_$1]),
[ac_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $1"
AC_COMPILE_IFELSE([AC_LANG_SOURCE([int a;])],
[eval "AS_TR_SH([cc_cv_cflags_$1])='yes'"],
[eval "AS_TR_SH([cc_cv_cflags_$1])='no'"])
CFLAGS="$ac_save_CFLAGS"
])
AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes],
[$2], [$3])
])
dnl Check if the flag is supported by compiler (cacheable)
dnl CC_CHECK_CFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND])
AC_DEFUN([CC_CHECK_CFLAGS], [
AC_CACHE_CHECK([if $CC supports $1 flag],
AS_TR_SH([cc_cv_cflags_$1]),
CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here!
)
AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes],
[$2], [$3])
])
dnl CC_CHECK_CFLAG_APPEND(FLAG, [action-if-found], [action-if-not-found])
dnl Check for CFLAG and appends them to CFLAGS if supported
AC_DEFUN([CC_CHECK_CFLAG_APPEND], [
AC_CACHE_CHECK([if $CC supports $1 flag],
AS_TR_SH([cc_cv_cflags_$1]),
CC_CHECK_CFLAGS_SILENT([$1]) dnl Don't execute actions here!
)
AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes],
[CFLAGS="$CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3])
])
dnl CC_CHECK_CFLAGS_APPEND([FLAG1 FLAG2], [action-if-found], [action-if-not])
AC_DEFUN([CC_CHECK_CFLAGS_APPEND], [
for flag in $1; do
CC_CHECK_CFLAG_APPEND($flag, [$2], [$3])
done
])
dnl Check if the flag is supported by linker (cacheable)
dnl CC_CHECK_LDFLAGS([FLAG], [ACTION-IF-FOUND],[ACTION-IF-NOT-FOUND])
AC_DEFUN([CC_CHECK_LDFLAGS], [
AC_CACHE_CHECK([if $CC supports $1 flag],
AS_TR_SH([cc_cv_ldflags_$1]),
[ac_save_LDFLAGS="$LDFLAGS"
LDFLAGS="$LDFLAGS $1"
AC_LANG_PUSH([C])
AC_LINK_IFELSE([AC_LANG_SOURCE([int main() { return 1; }])],
[eval "AS_TR_SH([cc_cv_ldflags_$1])='yes'"],
[eval "AS_TR_SH([cc_cv_ldflags_$1])="])
AC_LANG_POP([C])
LDFLAGS="$ac_save_LDFLAGS"
])
AS_IF([eval test x$]AS_TR_SH([cc_cv_ldflags_$1])[ = xyes],
[$2], [$3])
])
dnl define the LDFLAGS_NOUNDEFINED variable with the correct value for
dnl the current linker to avoid undefined references in a shared object.
AC_DEFUN([CC_NOUNDEFINED], [
dnl We check $host for which systems to enable this for.
AC_REQUIRE([AC_CANONICAL_HOST])
case $host in
dnl FreeBSD (et al.) does not complete linking for shared objects when pthreads
dnl are requested, as different implementations are present; to avoid problems
dnl use -Wl,-z,defs only for those platform not behaving this way.
*-freebsd* | *-openbsd*) ;;
*)
dnl First of all check for the --no-undefined variant of GNU ld. This allows
dnl for a much more readable commandline, so that people can understand what
dnl it does without going to look for what the heck -z defs does.
for possible_flags in "-Wl,--no-undefined" "-Wl,-z,defs"; do
CC_CHECK_LDFLAGS([$possible_flags], [LDFLAGS_NOUNDEFINED="$possible_flags"])
break
done
;;
esac
AC_SUBST([LDFLAGS_NOUNDEFINED])
])
dnl Check for a -Werror flag or equivalent. -Werror is the GCC
dnl and ICC flag that tells the compiler to treat all the warnings
dnl as fatal. We usually need this option to make sure that some
dnl constructs (like attributes) are not simply ignored.
dnl
dnl Other compilers don't support -Werror per se, but they support
dnl an equivalent flag:
dnl - Sun Studio compiler supports -errwarn=%all
AC_DEFUN([CC_CHECK_WERROR], [
AC_CACHE_CHECK(
[for $CC way to treat warnings as errors],
[cc_cv_werror],
[CC_CHECK_CFLAGS_SILENT([-Werror], [cc_cv_werror=-Werror],
[CC_CHECK_CFLAGS_SILENT([-errwarn=%all], [cc_cv_werror=-errwarn=%all])])
])
])
AC_DEFUN([CC_CHECK_ATTRIBUTE], [
AC_REQUIRE([CC_CHECK_WERROR])
AC_CACHE_CHECK([if $CC supports __attribute__(( ifelse([$2], , [$1], [$2]) ))],
AS_TR_SH([cc_cv_attribute_$1]),
[ac_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $cc_cv_werror"
AC_LANG_PUSH([C])
AC_COMPILE_IFELSE([AC_LANG_SOURCE([$3])],
[eval "AS_TR_SH([cc_cv_attribute_$1])='yes'"],
[eval "AS_TR_SH([cc_cv_attribute_$1])='no'"])
AC_LANG_POP([C])
CFLAGS="$ac_save_CFLAGS"
])
AS_IF([eval test x$]AS_TR_SH([cc_cv_attribute_$1])[ = xyes],
[AC_DEFINE(
AS_TR_CPP([SUPPORT_ATTRIBUTE_$1]), 1,
[Define this if the compiler supports __attribute__(( ifelse([$2], , [$1], [$2]) ))]
)
$4],
[$5])
])
AC_DEFUN([CC_ATTRIBUTE_CONSTRUCTOR], [
CC_CHECK_ATTRIBUTE(
[constructor],,
[void __attribute__((constructor)) ctor() { int a; }],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_FORMAT], [
CC_CHECK_ATTRIBUTE(
[format], [format(printf, n, n)],
[void __attribute__((format(printf, 1, 2))) printflike(const char *fmt, ...) { fmt = (void *)0; }],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_FORMAT_ARG], [
CC_CHECK_ATTRIBUTE(
[format_arg], [format_arg(printf)],
[char *__attribute__((format_arg(1))) gettextlike(const char *fmt) { fmt = (void *)0; }],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_VISIBILITY], [
CC_CHECK_ATTRIBUTE(
[visibility_$1], [visibility("$1")],
[void __attribute__((visibility("$1"))) $1_function() { }],
[$2], [$3])
])
AC_DEFUN([CC_ATTRIBUTE_NONNULL], [
CC_CHECK_ATTRIBUTE(
[nonnull], [nonnull()],
[void __attribute__((nonnull())) some_function(void *foo, void *bar) { foo = (void*)0; bar = (void*)0; }],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_UNUSED], [
CC_CHECK_ATTRIBUTE(
[unused], ,
[void some_function(void *foo, __attribute__((unused)) void *bar);],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_SENTINEL], [
CC_CHECK_ATTRIBUTE(
[sentinel], ,
[void some_function(void *foo, ...) __attribute__((sentinel));],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_DEPRECATED], [
CC_CHECK_ATTRIBUTE(
[deprecated], ,
[void some_function(void *foo, ...) __attribute__((deprecated));],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_ALIAS], [
CC_CHECK_ATTRIBUTE(
[alias], [weak, alias],
[void other_function(void *foo) { }
void some_function(void *foo) __attribute__((weak, alias("other_function")));],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_MALLOC], [
CC_CHECK_ATTRIBUTE(
[malloc], ,
[void * __attribute__((malloc)) my_alloc(int n);],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_PACKED], [
CC_CHECK_ATTRIBUTE(
[packed], ,
[struct astructure { char a; int b; long c; void *d; } __attribute__((packed));],
[$1], [$2])
])
AC_DEFUN([CC_ATTRIBUTE_CONST], [
CC_CHECK_ATTRIBUTE(
[const], ,
[int __attribute__((const)) twopow(int n) { return 1 << n; } ],
[$1], [$2])
])
AC_DEFUN([CC_FLAG_VISIBILITY], [
AC_REQUIRE([CC_CHECK_WERROR])
AC_CACHE_CHECK([if $CC supports -fvisibility=hidden],
[cc_cv_flag_visibility],
[cc_flag_visibility_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $cc_cv_werror"
CC_CHECK_CFLAGS_SILENT([-fvisibility=hidden],
cc_cv_flag_visibility='yes',
cc_cv_flag_visibility='no')
CFLAGS="$cc_flag_visibility_save_CFLAGS"])
AS_IF([test "x$cc_cv_flag_visibility" = "xyes"],
[AC_DEFINE([SUPPORT_FLAG_VISIBILITY], 1,
[Define this if the compiler supports the -fvisibility flag])
$1],
[$2])
])
AC_DEFUN([CC_FUNC_EXPECT], [
AC_REQUIRE([CC_CHECK_WERROR])
AC_CACHE_CHECK([if compiler has __builtin_expect function],
[cc_cv_func_expect],
[ac_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $cc_cv_werror"
AC_LANG_PUSH([C])
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[int some_function() {
int a = 3;
return (int)__builtin_expect(a, 3);
}])],
[cc_cv_func_expect=yes],
[cc_cv_func_expect=no])
AC_LANG_POP([C])
CFLAGS="$ac_save_CFLAGS"
])
AS_IF([test "x$cc_cv_func_expect" = "xyes"],
[AC_DEFINE([SUPPORT__BUILTIN_EXPECT], 1,
[Define this if the compiler supports __builtin_expect() function])
$1],
[$2])
])
AC_DEFUN([CC_ATTRIBUTE_ALIGNED], [
AC_REQUIRE([CC_CHECK_WERROR])
AC_CACHE_CHECK([highest __attribute__ ((aligned ())) supported],
[cc_cv_attribute_aligned],
[ac_save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $cc_cv_werror"
AC_LANG_PUSH([C])
for cc_attribute_align_try in 64 32 16 8 4 2; do
AC_COMPILE_IFELSE([AC_LANG_SOURCE([
int main() {
static char c __attribute__ ((aligned($cc_attribute_align_try))) = 0;
return c;
}])], [cc_cv_attribute_aligned=$cc_attribute_align_try; break])
done
AC_LANG_POP([C])
CFLAGS="$ac_save_CFLAGS"
])
if test "x$cc_cv_attribute_aligned" != "x"; then
AC_DEFINE_UNQUOTED([ATTRIBUTE_ALIGNED_MAX], [$cc_cv_attribute_aligned],
[Define the highest alignment supported])
fi
])

View File

@ -22,12 +22,20 @@
#include "uv.h"
#include "uv-common.h"
#ifdef _WIN32
#include "win/internal.h"
#include "win/handle-inl.h"
#define uv__make_close_pending(h) uv_want_endgame((h)->loop, (h))
#else
#include "unix/internal.h"
#endif
#include <assert.h>
#include <stdlib.h>
#include <string.h>
struct poll_ctx {
uv_fs_poll_t* parent_handle; /* NULL if parent has been stopped or closed */
uv_fs_poll_t* parent_handle;
int busy_polling;
unsigned int interval;
uint64_t start_time;
@ -36,6 +44,7 @@ struct poll_ctx {
uv_timer_t timer_handle;
uv_fs_t fs_req; /* TODO(bnoordhuis) mark fs_req internal */
uv_stat_t statbuf;
struct poll_ctx* previous; /* context from previous start()..stop() period */
char path[1]; /* variable length */
};
@ -49,6 +58,7 @@ static uv_stat_t zero_statbuf;
int uv_fs_poll_init(uv_loop_t* loop, uv_fs_poll_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_POLL);
handle->poll_ctx = NULL;
return 0;
}
@ -62,7 +72,7 @@ int uv_fs_poll_start(uv_fs_poll_t* handle,
size_t len;
int err;
if (uv__is_active(handle))
if (uv_is_active((uv_handle_t*)handle))
return 0;
loop = handle->loop;
@ -83,13 +93,15 @@ int uv_fs_poll_start(uv_fs_poll_t* handle,
if (err < 0)
goto error;
ctx->timer_handle.flags |= UV__HANDLE_INTERNAL;
ctx->timer_handle.flags |= UV_HANDLE_INTERNAL;
uv__handle_unref(&ctx->timer_handle);
err = uv_fs_stat(loop, &ctx->fs_req, ctx->path, poll_cb);
if (err < 0)
goto error;
if (handle->poll_ctx != NULL)
ctx->previous = handle->poll_ctx;
handle->poll_ctx = ctx;
uv__handle_start(handle);
@ -104,19 +116,17 @@ error:
int uv_fs_poll_stop(uv_fs_poll_t* handle) {
struct poll_ctx* ctx;
if (!uv__is_active(handle))
if (!uv_is_active((uv_handle_t*)handle))
return 0;
ctx = handle->poll_ctx;
assert(ctx != NULL);
assert(ctx->parent_handle != NULL);
ctx->parent_handle = NULL;
handle->poll_ctx = NULL;
assert(ctx->parent_handle == handle);
/* Close the timer if it's active. If it's inactive, there's a stat request
* in progress and poll_cb will take care of the cleanup.
*/
if (uv__is_active(&ctx->timer_handle))
if (uv_is_active((uv_handle_t*)&ctx->timer_handle))
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv__handle_stop(handle);
@ -129,7 +139,7 @@ int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
struct poll_ctx* ctx;
size_t required_len;
if (!uv__is_active(handle)) {
if (!uv_is_active((uv_handle_t*)handle)) {
*size = 0;
return UV_EINVAL;
}
@ -153,6 +163,9 @@ int uv_fs_poll_getpath(uv_fs_poll_t* handle, char* buffer, size_t* size) {
void uv__fs_poll_close(uv_fs_poll_t* handle) {
uv_fs_poll_stop(handle);
if (handle->poll_ctx == NULL)
uv__make_close_pending((uv_handle_t*)handle);
}
@ -173,14 +186,13 @@ static void poll_cb(uv_fs_t* req) {
uv_stat_t* statbuf;
struct poll_ctx* ctx;
uint64_t interval;
uv_fs_poll_t* handle;
ctx = container_of(req, struct poll_ctx, fs_req);
handle = ctx->parent_handle;
if (ctx->parent_handle == NULL) { /* handle has been stopped or closed */
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
uv_fs_req_cleanup(req);
return;
}
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle))
goto out;
if (req->result != 0) {
if (ctx->busy_polling != req->result) {
@ -205,7 +217,7 @@ static void poll_cb(uv_fs_t* req) {
out:
uv_fs_req_cleanup(req);
if (ctx->parent_handle == NULL) { /* handle has been stopped by callback */
if (!uv_is_active((uv_handle_t*)handle) || uv__is_closing(handle)) {
uv_close((uv_handle_t*)&ctx->timer_handle, timer_close_cb);
return;
}
@ -219,8 +231,27 @@ out:
}
static void timer_close_cb(uv_handle_t* handle) {
uv__free(container_of(handle, struct poll_ctx, timer_handle));
static void timer_close_cb(uv_handle_t* timer) {
struct poll_ctx* ctx;
struct poll_ctx* it;
struct poll_ctx* last;
uv_fs_poll_t* handle;
ctx = container_of(timer, struct poll_ctx, timer_handle);
handle = ctx->parent_handle;
if (ctx == handle->poll_ctx) {
handle->poll_ctx = ctx->previous;
if (handle->poll_ctx == NULL && uv__is_closing(handle))
uv__make_close_pending((uv_handle_t*)handle);
} else {
for (last = handle->poll_ctx, it = last->previous;
it != ctx;
last = it, it = it->previous) {
assert(last->previous != NULL);
}
last->previous = ctx->previous;
}
uv__free(ctx);
}
@ -248,7 +279,7 @@ static int statbuf_eq(const uv_stat_t* a, const uv_stat_t* b) {
#include "win/handle-inl.h"
void uv__fs_poll_endgame(uv_loop_t* loop, uv_fs_poll_t* handle) {
assert(handle->flags & UV__HANDLE_CLOSING);
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
uv__handle_close(handle);
}

291
deps/libuv/src/idna.c vendored Normal file
View File

@ -0,0 +1,291 @@
/* Copyright (c) 2011, 2018 Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* Derived from https://github.com/bnoordhuis/punycode
* but updated to support IDNA 2008.
*/
#include "uv.h"
#include "idna.h"
#include <string.h>
static unsigned uv__utf8_decode1_slow(const char** p,
const char* pe,
unsigned a) {
unsigned b;
unsigned c;
unsigned d;
unsigned min;
if (a > 0xF7)
return -1;
switch (*p - pe) {
default:
if (a > 0xEF) {
min = 0x10000;
a = a & 7;
b = (unsigned char) *(*p)++;
c = (unsigned char) *(*p)++;
d = (unsigned char) *(*p)++;
break;
}
/* Fall through. */
case 2:
if (a > 0xDF) {
min = 0x800;
b = 0x80 | (a & 15);
c = (unsigned char) *(*p)++;
d = (unsigned char) *(*p)++;
a = 0;
break;
}
/* Fall through. */
case 1:
if (a > 0xBF) {
min = 0x80;
b = 0x80;
c = 0x80 | (a & 31);
d = (unsigned char) *(*p)++;
a = 0;
break;
}
return -1; /* Invalid continuation byte. */
}
if (0x80 != (0xC0 & (b ^ c ^ d)))
return -1; /* Invalid sequence. */
b &= 63;
c &= 63;
d &= 63;
a = (a << 18) | (b << 12) | (c << 6) | d;
if (a < min)
return -1; /* Overlong sequence. */
if (a > 0x10FFFF)
return -1; /* Four-byte sequence > U+10FFFF. */
if (a >= 0xD800 && a <= 0xDFFF)
return -1; /* Surrogate pair. */
return a;
}
unsigned uv__utf8_decode1(const char** p, const char* pe) {
unsigned a;
a = (unsigned char) *(*p)++;
if (a < 128)
return a; /* ASCII, common case. */
return uv__utf8_decode1_slow(p, pe, a);
}
#define foreach_codepoint(c, p, pe) \
for (; (void) (*p <= pe && (c = uv__utf8_decode1(p, pe))), *p <= pe;)
static int uv__idna_toascii_label(const char* s, const char* se,
char** d, char* de) {
static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz0123456789";
const char* ss;
unsigned c;
unsigned h;
unsigned k;
unsigned n;
unsigned m;
unsigned q;
unsigned t;
unsigned x;
unsigned y;
unsigned bias;
unsigned delta;
unsigned todo;
int first;
h = 0;
ss = s;
todo = 0;
foreach_codepoint(c, &s, se) {
if (c < 128)
h++;
else if (c == (unsigned) -1)
return UV_EINVAL;
else
todo++;
}
if (todo > 0) {
if (*d < de) *(*d)++ = 'x';
if (*d < de) *(*d)++ = 'n';
if (*d < de) *(*d)++ = '-';
if (*d < de) *(*d)++ = '-';
}
x = 0;
s = ss;
foreach_codepoint(c, &s, se) {
if (c > 127)
continue;
if (*d < de)
*(*d)++ = c;
if (++x == h)
break; /* Visited all ASCII characters. */
}
if (todo == 0)
return h;
/* Only write separator when we've written ASCII characters first. */
if (h > 0)
if (*d < de)
*(*d)++ = '-';
n = 128;
bias = 72;
delta = 0;
first = 1;
while (todo > 0) {
m = -1;
s = ss;
foreach_codepoint(c, &s, se)
if (c >= n)
if (c < m)
m = c;
x = m - n;
y = h + 1;
if (x > ~delta / y)
return UV_E2BIG; /* Overflow. */
delta += x * y;
n = m;
s = ss;
foreach_codepoint(c, &s, se) {
if (c < n)
if (++delta == 0)
return UV_E2BIG; /* Overflow. */
if (c != n)
continue;
for (k = 36, q = delta; /* empty */; k += 36) {
t = 1;
if (k > bias)
t = k - bias;
if (t > 26)
t = 26;
if (q < t)
break;
/* TODO(bnoordhuis) Since 1 <= t <= 26 and therefore
* 10 <= y <= 35, we can optimize the long division
* into a table-based reciprocal multiplication.
*/
x = q - t;
y = 36 - t; /* 10 <= y <= 35 since 1 <= t <= 26. */
q = x / y;
t = t + x % y; /* 1 <= t <= 35 because of y. */
if (*d < de)
*(*d)++ = alphabet[t];
}
if (*d < de)
*(*d)++ = alphabet[q];
delta /= 2;
if (first) {
delta /= 350;
first = 0;
}
/* No overflow check is needed because |delta| was just
* divided by 2 and |delta+delta >= delta + delta/h|.
*/
h++;
delta += delta / h;
for (bias = 0; delta > 35 * 26 / 2; bias += 36)
delta /= 35;
bias += 36 * delta / (delta + 38);
delta = 0;
todo--;
}
delta++;
n++;
}
return 0;
}
#undef foreach_codepoint
long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
const char* si;
const char* st;
unsigned c;
char* ds;
int rc;
ds = d;
for (si = s; si < se; /* empty */) {
st = si;
c = uv__utf8_decode1(&si, se);
if (c != '.')
if (c != 0x3002) /* 。 */
if (c != 0xFF0E) /* */
if (c != 0xFF61) /* 。 */
continue;
rc = uv__idna_toascii_label(s, st, &d, de);
if (rc < 0)
return rc;
if (d < de)
*d++ = '.';
s = si;
}
if (s < se) {
rc = uv__idna_toascii_label(s, se, &d, de);
if (rc < 0)
return rc;
}
if (d < de)
*d++ = '\0';
return d - ds; /* Number of bytes written. */
}

31
deps/libuv/src/idna.h vendored Normal file
View File

@ -0,0 +1,31 @@
/* Copyright (c) 2011, 2018 Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SRC_IDNA_H_
#define UV_SRC_IDNA_H_
/* Decode a single codepoint. Returns the codepoint or UINT32_MAX on error.
* |p| is updated on success _and_ error, i.e., bad multi-byte sequences are
* skipped in their entirety, not just the first bad byte.
*/
unsigned uv__utf8_decode1(const char** p, const char* pe);
/* Convert a UTF-8 domain name to IDNA 2008 / Punycode. A return value >= 0
* is the number of bytes written to |d|, including the trailing nul byte.
* A return value < 0 is a libuv error code. |s| and |d| can not overlap.
*/
long uv__idna_toascii(const char* s, const char* se, char* d, char* de);
#endif /* UV_SRC_IDNA_H_ */

13
deps/libuv/src/inet.c vendored
View File

@ -19,7 +19,7 @@
#include <string.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "stdint-msvc2008.h"
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
@ -59,8 +59,7 @@ static int inet_ntop4(const unsigned char *src, char *dst, size_t size) {
if (l <= 0 || (size_t) l >= size) {
return UV_ENOSPC;
}
strncpy(dst, tmp, size);
dst[size - 1] = '\0';
uv__strscpy(dst, tmp, size);
return 0;
}
@ -142,14 +141,8 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
/*
* Check for overflow, copy, and we're done.
*/
if ((size_t)(tp - tmp) > size) {
if (UV_E2BIG == uv__strscpy(dst, tmp, size))
return UV_ENOSPC;
}
strcpy(dst, tmp);
return 0;
}

123
deps/libuv/src/random.c vendored Normal file
View File

@ -0,0 +1,123 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#ifdef _WIN32
# include "win/internal.h"
#else
# include "unix/internal.h"
#endif
static int uv__random(void* buf, size_t buflen) {
int rc;
#if defined(__PASE__)
rc = uv__random_readpath("/dev/urandom", buf, buflen);
#elif defined(_AIX)
rc = uv__random_readpath("/dev/random", buf, buflen);
#elif defined(__APPLE__) || defined(__OpenBSD__) || \
(defined(__ANDROID_API__) && __ANDROID_API__ >= 28)
rc = uv__random_getentropy(buf, buflen);
if (rc == UV_ENOSYS)
rc = uv__random_devurandom(buf, buflen);
#elif defined(__NetBSD__)
rc = uv__random_sysctl(buf, buflen);
#elif defined(__FreeBSD__) || defined(__linux__)
rc = uv__random_getrandom(buf, buflen);
if (rc == UV_ENOSYS)
rc = uv__random_devurandom(buf, buflen);
# if defined(__linux__)
switch (rc) {
case UV_EACCES:
case UV_EIO:
case UV_ELOOP:
case UV_EMFILE:
case UV_ENFILE:
case UV_ENOENT:
case UV_EPERM:
rc = uv__random_sysctl(buf, buflen);
break;
}
# endif
#elif defined(_WIN32)
uv__once_init();
rc = uv__random_rtlgenrandom(buf, buflen);
#else
rc = uv__random_devurandom(buf, buflen);
#endif
return rc;
}
static void uv__random_work(struct uv__work* w) {
uv_random_t* req;
req = container_of(w, uv_random_t, work_req);
req->status = uv__random(req->buf, req->buflen);
}
static void uv__random_done(struct uv__work* w, int status) {
uv_random_t* req;
req = container_of(w, uv_random_t, work_req);
uv__req_unregister(req->loop, req);
if (status == 0)
status = req->status;
req->cb(req, status, req->buf, req->buflen);
}
int uv_random(uv_loop_t* loop,
uv_random_t* req,
void *buf,
size_t buflen,
unsigned flags,
uv_random_cb cb) {
if (buflen > 0x7FFFFFFFu)
return UV_E2BIG;
if (flags != 0)
return UV_EINVAL;
if (cb == NULL)
return uv__random(buf, buflen);
uv__req_init(loop, req, UV_RANDOM);
req->loop = loop;
req->status = 0;
req->cb = cb;
req->buf = buf;
req->buflen = buflen;
uv__work_submit(loop,
&req->work_req,
UV__WORK_CPU,
uv__random_work,
uv__random_done);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More