🔧 vendor
This commit is contained in:
parent
01ccdc329a
commit
f96c8befdf
|
@ -1,23 +0,0 @@
|
|||
FROM golang:latest
|
||||
MAINTAINER Liang Ding <d@b3log.org>
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN apt-get update && apt-get install bzip2 zip unzip && cp -r /usr/local/go /usr/local/gobt
|
||||
ENV GOROOT_BOOTSTRAP=/usr/local/gobt
|
||||
|
||||
ADD . /wide/gogogo/src/github.com/b3log/wide
|
||||
|
||||
RUN useradd wide && useradd runner
|
||||
|
||||
ENV GOPATH /wide/gogogo
|
||||
|
||||
RUN go build github.com/go-fsnotify/fsnotify\
|
||||
&& go build github.com/gorilla/sessions\
|
||||
&& go build github.com/gorilla/websocket\
|
||||
&& go install github.com/visualfc/gotools github.com/nsf/gocode github.com/bradfitz/goimports
|
||||
|
||||
WORKDIR /wide/gogogo/src/github.com/b3log/wide
|
||||
RUN go build -v
|
||||
|
||||
EXPOSE 7070
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,180 +0,0 @@
|
|||
# [Wide](https://github.com/b3log/wide) [![Build Status](https://img.shields.io/travis/b3log/wide.svg?style=flat)](https://travis-ci.org/b3log/wide) [![Go Report Card](https://goreportcard.com/badge/github.com/b3log/wide)](https://goreportcard.com/report/github.com/b3log/wide) [![Coverage Status](https://img.shields.io/coveralls/b3log/wide.svg?style=flat)](https://coveralls.io/r/b3log/wide) [![Apache License](https://img.shields.io/badge/license-apache2-orange.svg?style=flat)](https://www.apache.org/licenses/LICENSE-2.0) [![API Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/b3log/wide) [![Download](https://img.shields.io/badge/download-~4.3K-red.svg?style=flat)](https://pan.baidu.com/s/1dD3XwOT)
|
||||
|
||||
_Have a [try](https://wide.b3log.org/signup) first, then [download](https://pan.baidu.com/s/1dD3XwOT) and setup it on your local area network, enjoy yourself!_
|
||||
|
||||
先试试我们搭建好的[在线服务](https://wide.b3log.org/signup),你可以在这里[下载](https://pan.baidu.com/s/1dD3XwOT)并在本地环境运行,然后邀请小伙伴们来玩吧!
|
||||
|
||||
> * 关于 Wide 的产品定位,请看[这里](https://hacpai.com/article/1438407961481),并欢迎参与讨论~
|
||||
> * 加入[**黑客派**](https://hacpai.com/register),与其他程序员、设计师共同成长!
|
||||
|
||||
## Introduction
|
||||
|
||||
A <b>W</b>eb-based <b>IDE</b> for Teams using Go programming language/Golang.
|
||||
|
||||
![Hello, 世界](https://cloud.githubusercontent.com/assets/873584/4606377/d0ca3c2a-521b-11e4-912c-d955ab05850b.png)
|
||||
|
||||
## Authors
|
||||
|
||||
[Daniel](https://github.com/88250) and [Vanessa](https://github.com/Vanessa219) are the main authors of Wide, [here](https://github.com/b3log/wide/graphs/contributors) are all contributors.
|
||||
|
||||
Wide 的主要作者是 [Daniel](https://github.com/88250) 与 [Vanessa](https://github.com/Vanessa219),所有贡献者可以在[这里](https://github.com/b3log/wide/graphs/contributors)看到。
|
||||
|
||||
## Motivation
|
||||
|
||||
* **Team** IDE:
|
||||
* _Safe and reliable_: the project source code stored on the server in real time, the developer's machine crashes without losing any source code
|
||||
* _Unified environment_: server unified development environment configuration, the developer machine without any additional configuration
|
||||
* _Out of the box_: 5 minutes to setup a server then open browser to develop, debug
|
||||
* _Version Control_: each developer has its own source code repository, easy sync with the trunk
|
||||
* **Web-based** IDE:
|
||||
* Developer needs a browser only
|
||||
* Cross-platform, even on mobile devices
|
||||
* Easy to extend
|
||||
* Easy to integrate with other systems
|
||||
* For the geeks
|
||||
* A try for commercial-open source: versions customized for enterprises, close to their development work flows respectively
|
||||
* Currently more popular Go IDE has some defects or regrets:
|
||||
* Text editor (vim/emacs/sublime/Atom, etc.): For the Go newbie is too complex
|
||||
* Plug-in (goclipse, etc.): the need for the original IDE support, not professional
|
||||
* LiteIDE: no modern user interface :p
|
||||
* No team development experience
|
||||
* There are a few of GO IDEs, and no one developed by Go itself, this is a nice try
|
||||
|
||||
## Features
|
||||
|
||||
* [X] Code Highlight, Folding: Go/HTML/JavaScript/Markdown etc.
|
||||
* [X] Autocomplete: Go/HTML etc.
|
||||
* [X] Format: Go/HTML/JSON etc.
|
||||
* [X] Build & Run
|
||||
* [X] Multiplayer: a real team development experience
|
||||
* [X] Navigation, Jump to declaration, Find usages, File search etc.
|
||||
* [X] Shell: run command on the server
|
||||
* [X] Web development: HTML/JS/CSS editor with [Emmet](https://emmet.io) integrated
|
||||
* [X] Go tool: go get/install/fmt etc.
|
||||
* [X] File Import & Export
|
||||
* [X] Themes: editor and UI adjust, respectively
|
||||
* [X] Cross-Compilation
|
||||
* [ ] Debug
|
||||
* [ ] Git integration: git command on the web
|
||||
|
||||
## Screenshots
|
||||
|
||||
* **Overview**
|
||||
|
||||
![Overview](https://cloud.githubusercontent.com/assets/873584/5450620/1d51831e-8543-11e4-930b-670871902425.png)
|
||||
* **Goto File**
|
||||
|
||||
![Goto File](https://cloud.githubusercontent.com/assets/873584/5450616/1d495da6-8543-11e4-9285-f9d9c60779ac.png)
|
||||
* **Autocomplete**
|
||||
|
||||
![Autocomplete](https://cloud.githubusercontent.com/assets/873584/5450619/1d4d5712-8543-11e4-8fe4-35dbc8348a6e.png)
|
||||
* **Theme**
|
||||
|
||||
![4](https://cloud.githubusercontent.com/assets/873584/5450617/1d4c0826-8543-11e4-8b86-f79a4e41550a.png)
|
||||
* **Show Expression Info**
|
||||
|
||||
![Show Expression Info](https://cloud.githubusercontent.com/assets/873584/5450618/1d4cd9f4-8543-11e4-950f-121bd3ff4a39.png)
|
||||
* **Build Error Info**
|
||||
|
||||
![Build Error Info](https://cloud.githubusercontent.com/assets/873584/5450632/3e51cccc-8543-11e4-8ca8-8d2427aa16b8.png)
|
||||
* **Git Clone**
|
||||
|
||||
![Git Clone](https://cloud.githubusercontent.com/assets/873584/6545235/2284f230-c5b7-11e4-985e-7e04367921b1.png)
|
||||
* **Cross-Compilation**
|
||||
|
||||
![Cross-Compilation](https://cloud.githubusercontent.com/assets/873584/10130037/226d75fc-65f7-11e5-94e4-25ee579ca175.png)
|
||||
|
||||
* **Playground**
|
||||
![Playground](https://cloud.githubusercontent.com/assets/873584/21209772/449ecfd2-c2b1-11e6-9aa6-a83477d9f269.gif)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Build & Run
|
||||
|
||||
![Build & Run](https://cloud.githubusercontent.com/assets/873584/4389219/3642bc62-43f3-11e4-8d1f-06d7aaf22784.png)
|
||||
|
||||
* A browser tab corresponds to a Wide session
|
||||
* Execution output push via WebSocket
|
||||
|
||||
Flow:
|
||||
1. Browser sends ````Build```` request
|
||||
2. Server executes ````go build```` command via ````os/exec````<br/>
|
||||
2.1. Generates a executable file
|
||||
3. Browser sends ````Run```` request
|
||||
4. Server executes the file via ````os/exec````<br/>
|
||||
4.1. A running process<br/>
|
||||
4.2. Execution output push via WebSocket channel
|
||||
5. Browser renders with callback function ````ws.onmessage````
|
||||
|
||||
### Code Assist
|
||||
|
||||
![Code Assist](https://cloud.githubusercontent.com/assets/873584/4399135/3b80c21c-4463-11e4-8e94-7f7e8d12a4df.png)
|
||||
|
||||
* Autocompletion
|
||||
* Find Usages/Jump To Declaration/etc.
|
||||
|
||||
Flow:
|
||||
1. Browser sends code assist request
|
||||
2. Handler gets user workspace of the request with HTTP session
|
||||
3. Server executes ````gocode````/````ide_stub(gotools)````<br/>
|
||||
3.1 Sets environment variables (e.g. ${GOPATH})<br/>
|
||||
3.2 ````gocode```` with ````lib-path```` parameter
|
||||
|
||||
## Documents
|
||||
|
||||
* [用户指南](https://www.gitbook.com/book/88250/wide-user-guide)
|
||||
* [开发指南](https://www.gitbook.com/book/88250/wide-dev-guide)
|
||||
|
||||
## Setup
|
||||
|
||||
### Download Binary
|
||||
|
||||
We have provided OS-specific executable binary as follows:
|
||||
|
||||
* linux-amd64/386
|
||||
* windows-amd64/386
|
||||
* darwin-amd64/386
|
||||
|
||||
Download [HERE](https://pan.baidu.com/s/1dD3XwOT)!
|
||||
|
||||
### Build Wide for yourself
|
||||
|
||||
1. [Download](https://github.com/b3log/wide/archive/master.zip) source or by `git clone https://github.com/b3log/wide`
|
||||
2. Get dependencies with
|
||||
* `go get`
|
||||
* `go get github.com/visualfc/gotools github.com/nsf/gocode github.com/bradfitz/goimports`
|
||||
3. Compile wide with `go build`
|
||||
|
||||
### Docker
|
||||
|
||||
1. Get image: `sudo docker pull 88250/wide:latest`
|
||||
2. Run: `sudo docker run -p 127.0.0.1:7070:7070 88250/wide:latest ./wide -docker=true -channel=ws://127.0.0.1:7070`
|
||||
3. Open browser: http://127.0.0.1:7070
|
||||
|
||||
## Known Issues
|
||||
|
||||
* [Shell is not available on Windows](https://github.com/b3log/wide/issues/32)
|
||||
* [Rename directory](https://github.com/b3log/wide/issues/251)
|
||||
|
||||
## Terms
|
||||
|
||||
* This software is open sourced under the Apache License 2.0
|
||||
* You can not get rid of the "Powered by [B3log](https://b3log.org)" from any page, even which you made
|
||||
* If you want to use this software for commercial purpose, please mail to support@liuyun.io for a commercial license request
|
||||
* Copyright © b3log.org, all rights reserved
|
||||
|
||||
## Credits
|
||||
|
||||
Wide is made possible by the following open source projects.
|
||||
|
||||
* [golang](https://golang.org)
|
||||
* [CodeMirror](https://github.com/marijnh/CodeMirror)
|
||||
* [zTree](https://github.com/zTree/zTree_v3)
|
||||
* [LiteIDE](https://github.com/visualfc/liteide)
|
||||
* [gocode](https://github.com/nsf/gocode)
|
||||
* [Gorilla](https://github.com/gorilla)
|
||||
* [Docker](https://docker.com)
|
||||
|
||||
----
|
||||
|
||||
<img src="https://cloud.githubusercontent.com/assets/873584/4606328/4e848b96-5219-11e4-8db1-fa12774b57b4.png" width="256px" />
|
|
@ -1,4 +0,0 @@
|
|||
* This software is open sourced under the Apache License 2.0
|
||||
* You can not get rid of the "Powered by [B3log](https://b3log.org)" from any pages, even the pages are developed by you
|
||||
* If you want to use this software for commercial purpose, please mail to support@liuyun.io for request a commercial license
|
||||
* Copyright (c) b3log.org, all rights reserved
|
|
@ -1,24 +0,0 @@
|
|||
#!/bin/bash
|
||||
# see https://gist.github.com/hailiang/0f22736320abe6be71ce for more details
|
||||
|
||||
set -e
|
||||
|
||||
# Run test coverage on each subdirectories and merge the coverage profile.
|
||||
|
||||
echo "mode: count" > profile.cov
|
||||
|
||||
# Standard go tooling behavior is to ignore dirs with leading underscors
|
||||
for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d);
|
||||
do
|
||||
if ls $dir/*.go &> /dev/null; then
|
||||
go test -covermode=count -coverprofile=$dir/profile.tmp $dir
|
||||
if [ -f $dir/profile.tmp ]
|
||||
then
|
||||
cat $dir/profile.tmp | tail -n +2 >> profile.cov
|
||||
rm $dir/profile.tmp
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
go tool cover -func profile.cov
|
||||
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2014-2015, b3log.org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file frontend tool.
|
||||
*
|
||||
* @author <a href="mailto:liliyuan@fangstar.net">Liyuan Li</a>
|
||||
* @version 0.1.0.0, Dec 15, 2015
|
||||
*/
|
||||
var gulp = require("gulp");
|
||||
var concat = require('gulp-concat');
|
||||
var minifyCSS = require('gulp-minify-css');
|
||||
var uglify = require('gulp-uglify');
|
||||
var sourcemaps = require("gulp-sourcemaps");
|
||||
|
||||
gulp.task('cc', function () {
|
||||
// css
|
||||
var cssLibs = ['./static/js/lib/jquery-layout/layout-default-latest.css',
|
||||
'./static/js/lib/codemirror-5.1/codemirror.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/hint/show-hint.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/lint.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldgutter.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/dialog/dialog.css',
|
||||
'./static/js/overwrite/codemirror/theme/*.css'];
|
||||
gulp.src(cssLibs)
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('lib.min.css'))
|
||||
.pipe(gulp.dest('./static/css/'));
|
||||
|
||||
gulp.src('./static/js/lib/ztree/zTreeStyle.css')
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('zTreeStyle.min.css'))
|
||||
.pipe(gulp.dest('./static/js/lib/ztree/'));
|
||||
|
||||
var cssWide = ['./static/css/dialog.css',
|
||||
'./static/css/base.css',
|
||||
'./static/css/wide.css',
|
||||
'./static/css/side.css',
|
||||
'./static/css/start.css',
|
||||
'./static/css/about.css'
|
||||
];
|
||||
|
||||
gulp.src(cssWide)
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('wide.min.css'))
|
||||
.pipe(gulp.dest('./static/css/'));
|
||||
|
||||
|
||||
// js
|
||||
var jsLibs = ['./static/js/lib/jquery-2.1.1.min.js',
|
||||
'./static/js/lib/jquery-ui.min.js',
|
||||
'./static/js/lib/jquery-layout/jquery.layout-latest.js',
|
||||
'./static/js/lib/reconnecting-websocket.js',
|
||||
'./static/js/lib/Autolinker.min.js',
|
||||
'./static/js/lib/emmet.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify-html.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify-css.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/vendor/jquery.ui.widget.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/jquery.iframe-transport.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/jquery.fileupload.js',
|
||||
'./static/js/lib/codemirror-5.1/codemirror.min.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/lint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/json-lint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/selection/active-line.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/selection/active-line.js',
|
||||
'./static/js/overwrite/codemirror/addon/hint/show-hint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/hint/anyword-hint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/display/rulers.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/closebrackets.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/matchbrackets.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/closetag.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/searchcursor.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/search.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/dialog/dialog.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/match-highlighter.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldcode.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldgutter.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/brace-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/xml-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/markdown-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/comment-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/mode/loadmode.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/comment/comment.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/meta.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/go/go.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/clike/clike.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/xml/xml.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/htmlmixed/htmlmixed.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/javascript/javascript.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/markdown/markdown.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/css/css.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/shell/shell.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/sql/sql.js',
|
||||
'./static/js/lib/codemirror-5.1/keymap/vim.js',
|
||||
'./static/js/lib/lint/json-lint.js',
|
||||
'./static/js/lib/lint/go-lint.js'];
|
||||
gulp.src(jsLibs)
|
||||
.pipe(uglify())
|
||||
.pipe(concat('lib.min.js'))
|
||||
.pipe(gulp.dest('./static/js/'));
|
||||
|
||||
var jsWide = ['./static/js/tabs.js',
|
||||
'./static/js/tabs.js',
|
||||
'./static/js/dialog.js',
|
||||
'./static/js/editors.js',
|
||||
'./static/js/notification.js',
|
||||
'./static/js/tree.js',
|
||||
'./static/js/wide.js',
|
||||
'./static/js/session.js',
|
||||
'./static/js/menu.js',
|
||||
'./static/js/windows.js',
|
||||
'./static/js/hotkeys.js',
|
||||
'./static/js/bottomGroup.js'
|
||||
];
|
||||
gulp.src(jsWide)
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(uglify())
|
||||
.pipe(concat('wide.min.js'))
|
||||
.pipe(sourcemaps.write("."))
|
||||
.pipe(gulp.dest('./static/js/'));
|
||||
});
|
Binary file not shown.
|
@ -1,483 +0,0 @@
|
|||
// Copyright (c) 2014-2018, b3log.org & hacpai.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"html/template"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/b3log/wide/conf"
|
||||
"github.com/b3log/wide/editor"
|
||||
"github.com/b3log/wide/event"
|
||||
"github.com/b3log/wide/file"
|
||||
"github.com/b3log/wide/i18n"
|
||||
"github.com/b3log/wide/log"
|
||||
"github.com/b3log/wide/notification"
|
||||
"github.com/b3log/wide/output"
|
||||
"github.com/b3log/wide/playground"
|
||||
"github.com/b3log/wide/scm/git"
|
||||
"github.com/b3log/wide/session"
|
||||
"github.com/b3log/wide/util"
|
||||
)
|
||||
|
||||
// Logger
|
||||
var logger *log.Logger
|
||||
|
||||
// The only one init function in Wide.
|
||||
func init() {
|
||||
confPath := flag.String("conf", "conf/wide.json", "path of wide.json")
|
||||
confIP := flag.String("ip", "", "this will overwrite Wide.IP if specified")
|
||||
confPort := flag.String("port", "", "this will overwrite Wide.Port if specified")
|
||||
confServer := flag.String("server", "", "this will overwrite Wide.Server if specified")
|
||||
confLogLevel := flag.String("log_level", "", "this will overwrite Wide.LogLevel if specified")
|
||||
confStaticServer := flag.String("static_server", "", "this will overwrite Wide.StaticServer if specified")
|
||||
confContext := flag.String("context", "", "this will overwrite Wide.Context if specified")
|
||||
confChannel := flag.String("channel", "", "this will overwrite Wide.Channel if specified")
|
||||
confStat := flag.Bool("stat", false, "whether report statistics periodically")
|
||||
confDocker := flag.Bool("docker", false, "whether run in a docker container")
|
||||
confPlayground := flag.String("playground", "", "this will overwrite Wide.Playground if specified")
|
||||
confUsersWorkspaces := flag.String("users_workspaces", "", "this will overwrite Wide.UsersWorkspaces if specified")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
log.SetLevel("warn")
|
||||
logger = log.NewLogger(os.Stdout)
|
||||
|
||||
wd := util.OS.Pwd()
|
||||
if strings.HasPrefix(wd, os.TempDir()) {
|
||||
logger.Error("Don't run Wide in OS' temp directory or with `go run`")
|
||||
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
i18n.Load()
|
||||
event.Load()
|
||||
conf.Load(*confPath, *confIP, *confPort, *confServer, *confLogLevel, *confStaticServer, *confContext, *confChannel,
|
||||
*confPlayground, *confDocker, *confUsersWorkspaces)
|
||||
|
||||
conf.FixedTimeCheckEnv()
|
||||
session.FixedTimeSave()
|
||||
session.FixedTimeRelease()
|
||||
|
||||
if *confStat {
|
||||
session.FixedTimeReport()
|
||||
}
|
||||
|
||||
logger.Debug("host ["+runtime.Version()+", "+runtime.GOOS+"_"+runtime.GOARCH+"], cross-compilation ",
|
||||
util.Go.GetCrossPlatforms())
|
||||
}
|
||||
|
||||
// Main.
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(conf.Wide.MaxProcs)
|
||||
|
||||
initMime()
|
||||
handleSignal()
|
||||
|
||||
// IDE
|
||||
http.HandleFunc(conf.Wide.Context+"/", handlerGzWrapper(indexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/start", handlerWrapper(startHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/about", handlerWrapper(aboutHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/keyboard_shortcuts", handlerWrapper(keyboardShortcutsHandler))
|
||||
|
||||
// static resources
|
||||
http.Handle(conf.Wide.Context+"/static/", http.StripPrefix(conf.Wide.Context+"/static/", http.FileServer(http.Dir("static"))))
|
||||
serveSingle("/favicon.ico", "./static/favicon.ico")
|
||||
|
||||
// workspaces
|
||||
for _, user := range conf.Users {
|
||||
http.Handle(conf.Wide.Context+"/workspace/"+user.Name+"/",
|
||||
http.StripPrefix(conf.Wide.Context+"/workspace/"+user.Name+"/", http.FileServer(http.Dir(user.WorkspacePath()))))
|
||||
}
|
||||
|
||||
// session
|
||||
http.HandleFunc(conf.Wide.Context+"/session/ws", handlerWrapper(session.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/session/save", handlerWrapper(session.SaveContentHandler))
|
||||
|
||||
// run
|
||||
http.HandleFunc(conf.Wide.Context+"/build", handlerWrapper(output.BuildHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/run", handlerWrapper(output.RunHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/stop", handlerWrapper(output.StopHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/test", handlerWrapper(output.GoTestHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/vet", handlerWrapper(output.GoVetHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/get", handlerWrapper(output.GoGetHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/install", handlerWrapper(output.GoInstallHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/output/ws", handlerWrapper(output.WSHandler))
|
||||
|
||||
// cross-compilation
|
||||
http.HandleFunc(conf.Wide.Context+"/cross", handlerWrapper(output.CrossCompilationHandler))
|
||||
|
||||
// file tree
|
||||
http.HandleFunc(conf.Wide.Context+"/files", handlerWrapper(file.GetFilesHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/refresh", handlerWrapper(file.RefreshDirectoryHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file", handlerWrapper(file.GetFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/save", handlerWrapper(file.SaveFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/new", handlerWrapper(file.NewFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/remove", handlerWrapper(file.RemoveFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/rename", handlerWrapper(file.RenameFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/search/text", handlerWrapper(file.SearchTextHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/find/name", handlerWrapper(file.FindHandler))
|
||||
|
||||
// outline
|
||||
http.HandleFunc(conf.Wide.Context+"/outline", handlerWrapper(file.GetOutlineHandler))
|
||||
|
||||
// file export/import
|
||||
http.HandleFunc(conf.Wide.Context+"/file/zip/new", handlerWrapper(file.CreateZipHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/zip", handlerWrapper(file.GetZipHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/upload", handlerWrapper(file.UploadHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/decompress", handlerWrapper(file.DecompressHandler))
|
||||
|
||||
// editor
|
||||
http.HandleFunc(conf.Wide.Context+"/editor/ws", handlerWrapper(editor.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/fmt", handlerWrapper(editor.GoFmtHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/autocomplete", handlerWrapper(editor.AutocompleteHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/exprinfo", handlerWrapper(editor.GetExprInfoHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/find/decl", handlerWrapper(editor.FindDeclarationHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/find/usages", handlerWrapper(editor.FindUsagesHandler))
|
||||
|
||||
// shell
|
||||
// http.HandleFunc(conf.Wide.Context+"/shell/ws", handlerWrapper(shell.WSHandler))
|
||||
// http.HandleFunc(conf.Wide.Context+"/shell", handlerWrapper(shell.IndexHandler))
|
||||
|
||||
// notification
|
||||
http.HandleFunc(conf.Wide.Context+"/notification/ws", handlerWrapper(notification.WSHandler))
|
||||
|
||||
// user
|
||||
http.HandleFunc(conf.Wide.Context+"/login", handlerWrapper(session.LoginHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/logout", handlerWrapper(session.LogoutHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/signup", handlerWrapper(session.SignUpUserHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/preference", handlerWrapper(session.PreferenceHandler))
|
||||
|
||||
// playground
|
||||
http.HandleFunc(conf.Wide.Context+"/playground", handlerWrapper(playground.IndexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/", handlerWrapper(playground.IndexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/ws", handlerWrapper(playground.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/save", handlerWrapper(playground.SaveHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/short-url", handlerWrapper(playground.ShortURLHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/build", handlerWrapper(playground.BuildHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/run", handlerWrapper(playground.RunHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/stop", handlerWrapper(playground.StopHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/autocomplete", handlerWrapper(playground.AutocompleteHandler))
|
||||
|
||||
// git
|
||||
http.HandleFunc(conf.Wide.Context+"/git/clone", handlerWrapper(git.CloneHandler))
|
||||
|
||||
logger.Infof("Wide is running [%s]", conf.Wide.Server+conf.Wide.Context)
|
||||
|
||||
err := http.ListenAndServe(conf.Wide.Server, nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// indexHandler handles request of Wide index.
|
||||
func indexHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if conf.Wide.Context+"/" != r.RequestURI {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
if "playground" == username { // reserved user for Playground
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
user := conf.GetUser(username)
|
||||
if nil == user {
|
||||
logger.Warnf("Not found user [%s]", username)
|
||||
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
locale := user.Locale
|
||||
|
||||
wideSessions := session.WideSessions.GetByUsername(username)
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"username": username, "sid": session.WideSessions.GenId(), "latestSessionContent": user.LatestSessionContent,
|
||||
"pathSeparator": conf.PathSeparator, "codeMirrorVer": conf.CodeMirrorVer,
|
||||
"user": user, "editorThemes": conf.GetEditorThemes(), "crossPlatforms": util.Go.GetCrossPlatforms()}
|
||||
|
||||
logger.Debugf("User [%s] has [%d] sessions", username, len(wideSessions))
|
||||
|
||||
t, err := template.ParseFiles("views/index.html")
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// handleSignal handles system signal for graceful shutdown.
|
||||
func handleSignal() {
|
||||
go func() {
|
||||
c := make(chan os.Signal)
|
||||
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
|
||||
s := <-c
|
||||
logger.Tracef("Got signal [%s]", s)
|
||||
|
||||
session.SaveOnlineUsers()
|
||||
logger.Tracef("Saved all online user, exit")
|
||||
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
// serveSingle registers the handler function for the given pattern and filename.
|
||||
func serveSingle(pattern string, filename string) {
|
||||
http.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filename)
|
||||
})
|
||||
}
|
||||
|
||||
// startHandler handles request of start page.
|
||||
func startHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
userWorkspace := conf.GetUserWorkspace(username)
|
||||
|
||||
sid := r.URL.Query()["sid"][0]
|
||||
wSession := session.WideSessions.Get(sid)
|
||||
if nil == wSession {
|
||||
logger.Errorf("Session [%s] not found", sid)
|
||||
}
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"username": username, "workspace": userWorkspace, "ver": conf.WideVersion, "sid": sid}
|
||||
|
||||
t, err := template.ParseFiles("views/start.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// keyboardShortcutsHandler handles request of keyboard shortcuts page.
|
||||
func keyboardShortcutsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale}
|
||||
|
||||
t, err := template.ParseFiles("views/keyboard_shortcuts.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// aboutHandle handles request of about page.
|
||||
func aboutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"ver": conf.WideVersion, "goos": runtime.GOOS, "goarch": runtime.GOARCH, "gover": runtime.Version()}
|
||||
|
||||
t, err := template.ParseFiles("views/about.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// handlerWrapper wraps the HTTP Handler for some common processes.
|
||||
//
|
||||
// 1. panic recover
|
||||
// 2. request stopwatch
|
||||
// 3. i18n
|
||||
func handlerWrapper(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
handler := panicRecover(f)
|
||||
handler = stopwatch(handler)
|
||||
handler = i18nLoad(handler)
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// handlerGzWrapper wraps the HTTP Handler for some common processes.
|
||||
//
|
||||
// 1. panic recover
|
||||
// 2. gzip response
|
||||
// 3. request stopwatch
|
||||
// 4. i18n
|
||||
func handlerGzWrapper(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
handler := panicRecover(f)
|
||||
handler = gzipWrapper(handler)
|
||||
handler = stopwatch(handler)
|
||||
handler = i18nLoad(handler)
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// gzipWrapper wraps the process with response gzip.
|
||||
func gzipWrapper(f func(http.ResponseWriter, *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
f(w, r)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
gzr := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
|
||||
f(gzr, r)
|
||||
}
|
||||
}
|
||||
|
||||
// i18nLoad wraps the i18n process.
|
||||
func i18nLoad(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
i18n.Load()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// stopwatch wraps the request stopwatch process.
|
||||
func stopwatch(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
logger.Tracef("[%s, %s, %s]", r.Method, r.RequestURI, time.Since(start))
|
||||
}()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// panicRecover wraps the panic recover process.
|
||||
func panicRecover(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
defer util.Recover()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// initMime initializes mime types.
|
||||
//
|
||||
// We can't get the mime types on some OS (such as Windows XP) by default, so initializes them here.
|
||||
func initMime() {
|
||||
mime.AddExtensionType(".css", "text/css")
|
||||
mime.AddExtensionType(".js", "application/x-javascript")
|
||||
mime.AddExtensionType(".json", "application/json")
|
||||
}
|
||||
|
||||
// gzipResponseWriter represents a gzip response writer.
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// Write writes response with appropriate 'Content-Type'.
|
||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if "" == w.Header().Get("Content-Type") {
|
||||
// If no content type, apply sniffing algorithm to un-gzipped body.
|
||||
w.Header().Set("Content-Type", http.DetectContentType(b))
|
||||
}
|
||||
|
||||
return w.Writer.Write(b)
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
"name": "wide",
|
||||
"version": "1.4.0",
|
||||
"description": "A Web-based IDE for Teams using Go programming language/Golang.",
|
||||
"homepage": "https://wide.b3log.org",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/b3log/wide.git"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/b3log/wide/issues"
|
||||
},
|
||||
"license": "Apache License",
|
||||
"private": true,
|
||||
"author": "Daniel <d@b3log.org> (http://88250.b3log.org) & Vanessa <v@b3log.org> (http://vanessa.b3log.org)",
|
||||
"maintainers": [
|
||||
{
|
||||
"name": "Daniel",
|
||||
"email": "d@b3log.org"
|
||||
},
|
||||
{
|
||||
"name": "Vanessa",
|
||||
"email": "v@b3log.org"
|
||||
}
|
||||
],
|
||||
"devDependencies": {
|
||||
"gulp": "^3.9.1",
|
||||
"gulp-concat": "^2.6.1",
|
||||
"gulp-minify-css": "^1.2.4",
|
||||
"gulp-sourcemaps": "^2.6.0",
|
||||
"gulp-uglify": "^2.1.2"
|
||||
}
|
||||
}
|
|
@ -1,80 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Wide package tool.
|
||||
#
|
||||
# Command:
|
||||
# ./pkg.sh ${version} ${target}
|
||||
# Example:
|
||||
# ./pkg.sh 1.0.0 /home/daniel/1.0.0/
|
||||
|
||||
ver=$1
|
||||
target=$2
|
||||
list="conf doc i18n static views README.md TERMS.md LICENSE"
|
||||
|
||||
mkdir -p ${target}
|
||||
|
||||
echo version=${ver}
|
||||
echo target=${target}
|
||||
|
||||
## darwin
|
||||
os=darwin
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
## linux
|
||||
os=linux
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
## windows
|
||||
os=windows
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.zip
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
|
||||
rm -f wide.exe gotools.exe gocode.exe
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.zip
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
|
||||
rm -f wide.exe gotools.exe gocode.exe
|
File diff suppressed because it is too large
Load Diff
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,17 +0,0 @@
|
|||
This tool updates your Go import lines, adding missing ones and
|
||||
removing unreferenced ones.
|
||||
|
||||
$ go get golang.org/x/tools/cmd/goimports
|
||||
|
||||
Note the new location. This project has moved to the official
|
||||
go.tools repo. Pull requests here will no longer be accepted.
|
||||
Please use the Go process: http://golang.org/doc/contribute.html
|
||||
|
||||
It acts the same as gofmt (same flags, etc) but in addition to code
|
||||
formatting, also fixes imports.
|
||||
|
||||
See usage and editor integration notes, now moved elsewhere:
|
||||
|
||||
http://godoc.org/golang.org/x/tools/cmd/goimports
|
||||
|
||||
Happy hacking!
|
|
@ -1,195 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/scanner"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
var (
|
||||
// main operation modes
|
||||
list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
|
||||
write = flag.Bool("w", false, "write result to (source) file instead of stdout")
|
||||
doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
|
||||
|
||||
options = &imports.Options{
|
||||
TabWidth: 8,
|
||||
TabIndent: true,
|
||||
Comments: true,
|
||||
Fragment: true,
|
||||
}
|
||||
exitCode = 0
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
|
||||
}
|
||||
|
||||
func report(err error) {
|
||||
scanner.PrintError(os.Stderr, err)
|
||||
exitCode = 2
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func isGoFile(f os.FileInfo) bool {
|
||||
// ignore non-Go files
|
||||
name := f.Name()
|
||||
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
|
||||
}
|
||||
|
||||
func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error {
|
||||
opt := options
|
||||
if stdin {
|
||||
nopt := *options
|
||||
nopt.Fragment = true
|
||||
opt = &nopt
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := imports.Process(filename, src, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(src, res) {
|
||||
// formatting has changed
|
||||
if *list {
|
||||
fmt.Fprintln(out, filename)
|
||||
}
|
||||
if *write {
|
||||
err = ioutil.WriteFile(filename, res, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if *doDiff {
|
||||
data, err := diff(src, res)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing diff: %s", err)
|
||||
}
|
||||
fmt.Printf("diff %s gofmt/%s\n", filename, filename)
|
||||
out.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
if !*list && !*write && !*doDiff {
|
||||
_, err = out.Write(res)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func visitFile(path string, f os.FileInfo, err error) error {
|
||||
if err == nil && isGoFile(f) {
|
||||
err = processFile(path, nil, os.Stdout, false)
|
||||
}
|
||||
if err != nil {
|
||||
report(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkDir(path string) {
|
||||
filepath.Walk(path, visitFile)
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// call gofmtMain in a separate function
|
||||
// so that it can use defer and have them
|
||||
// run before the exit.
|
||||
gofmtMain()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func gofmtMain() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if options.TabWidth < 0 {
|
||||
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
|
||||
exitCode = 2
|
||||
return
|
||||
}
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
if err := processFile("<standard input>", os.Stdin, os.Stdout, true); err != nil {
|
||||
report(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < flag.NArg(); i++ {
|
||||
path := flag.Arg(i)
|
||||
switch dir, err := os.Stat(path); {
|
||||
case err != nil:
|
||||
report(err)
|
||||
case dir.IsDir():
|
||||
walkDir(path)
|
||||
default:
|
||||
if err := processFile(path, nil, os.Stdout, false); err != nil {
|
||||
report(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diff(b1, b2 []byte) (data []byte, err error) {
|
||||
f1, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f1.Name())
|
||||
defer f1.Close()
|
||||
|
||||
f2, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f2.Name())
|
||||
defer f2.Close()
|
||||
|
||||
f1.Write(b1)
|
||||
f2.Write(b2)
|
||||
|
||||
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
|
||||
if len(data) > 0 {
|
||||
// diff exits with a non-zero status when the files don't match.
|
||||
// Ignore that failure as long as we get output.
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
|
@ -1,307 +0,0 @@
|
|||
# Changelog
|
||||
|
||||
## v1.4.2 / 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## v1.4.1 / 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## v1.4.0 / 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
|
@ -1,77 +0,0 @@
|
|||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, macOS and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
|
@ -1,28 +0,0 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,79 +0,0 @@
|
|||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
func (op Op) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
if buffer.Len() == 0 {
|
||||
return ""
|
||||
}
|
||||
return buffer.String()[1:] // Strip leading pipe
|
||||
}
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var ErrEventOverflow = errors.New("fsnotify queue overflow")
|
|
@ -1,337 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watchEntry := w.watches[name]
|
||||
if watchEntry != nil {
|
||||
flags |= watchEntry.flags | unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
if watchEntry == nil {
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
} else {
|
||||
watchEntry.wd = uint32(wd)
|
||||
watchEntry.flags = flags
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
|
||||
// We successfully removed the watch if InotifyRmWatch doesn't return an
|
||||
// error, we need to clean up our internal state to ensure it matches
|
||||
// inotify's kernel state.
|
||||
delete(w.paths, int(watch.wd))
|
||||
delete(w.watches, name)
|
||||
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
|
||||
if mask&unix.IN_Q_OVERFLOW != 0 {
|
||||
select {
|
||||
case w.Errors <- ErrEventOverflow:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name, ok := w.paths[int(raw.Wd)]
|
||||
// IN_DELETE_SELF occurs when the file/directory being watched is removed.
|
||||
// This is a sign to clean up the maps, otherwise we are no longer in sync
|
||||
// with the inotify kernel state which has already deleted the watch
|
||||
// automatically.
|
||||
if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
|
||||
delete(w.paths, int(raw.Wd))
|
||||
delete(w.watches, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
|
@ -1,187 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(0)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
|
@ -1,503 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan bool),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// copy paths to remove while locked
|
||||
w.mu.Lock()
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
var err error
|
||||
for _, name := range pathsToRemove {
|
||||
if e := w.Remove(name); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine:
|
||||
w.done <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
w.Errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel
|
||||
w.Events <- event
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
w.Events <- newCreateEvent(filePath)
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
|
@ -1,12 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY
|
|
@ -1,561 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,10 +0,0 @@
|
|||
context
|
||||
=======
|
||||
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
|
||||
|
||||
gorilla/context is a general purpose registry for global request variables.
|
||||
|
||||
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
|
||||
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
|
||||
|
||||
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
|
@ -1,143 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
mutex sync.RWMutex
|
||||
data = make(map[*http.Request]map[interface{}]interface{})
|
||||
datat = make(map[*http.Request]int64)
|
||||
)
|
||||
|
||||
// Set stores a value for a given key in a given request.
|
||||
func Set(r *http.Request, key, val interface{}) {
|
||||
mutex.Lock()
|
||||
if data[r] == nil {
|
||||
data[r] = make(map[interface{}]interface{})
|
||||
datat[r] = time.Now().Unix()
|
||||
}
|
||||
data[r][key] = val
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a value stored for a given key in a given request.
|
||||
func Get(r *http.Request, key interface{}) interface{} {
|
||||
mutex.RLock()
|
||||
if ctx := data[r]; ctx != nil {
|
||||
value := ctx[key]
|
||||
mutex.RUnlock()
|
||||
return value
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOk returns stored value and presence state like multi-value return of map access.
|
||||
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
|
||||
mutex.RLock()
|
||||
if _, ok := data[r]; ok {
|
||||
value, ok := data[r][key]
|
||||
mutex.RUnlock()
|
||||
return value, ok
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
|
||||
func GetAll(r *http.Request) map[interface{}]interface{} {
|
||||
mutex.RLock()
|
||||
if context, ok := data[r]; ok {
|
||||
result := make(map[interface{}]interface{}, len(context))
|
||||
for k, v := range context {
|
||||
result[k] = v
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return result
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
|
||||
// the request was registered.
|
||||
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
|
||||
mutex.RLock()
|
||||
context, ok := data[r]
|
||||
result := make(map[interface{}]interface{}, len(context))
|
||||
for k, v := range context {
|
||||
result[k] = v
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// Delete removes a value stored for a given key in a given request.
|
||||
func Delete(r *http.Request, key interface{}) {
|
||||
mutex.Lock()
|
||||
if data[r] != nil {
|
||||
delete(data[r], key)
|
||||
}
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// Clear removes all values stored for a given request.
|
||||
//
|
||||
// This is usually called by a handler wrapper to clean up request
|
||||
// variables at the end of a request lifetime. See ClearHandler().
|
||||
func Clear(r *http.Request) {
|
||||
mutex.Lock()
|
||||
clear(r)
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// clear is Clear without the lock.
|
||||
func clear(r *http.Request) {
|
||||
delete(data, r)
|
||||
delete(datat, r)
|
||||
}
|
||||
|
||||
// Purge removes request data stored for longer than maxAge, in seconds.
|
||||
// It returns the amount of requests removed.
|
||||
//
|
||||
// If maxAge <= 0, all request data is removed.
|
||||
//
|
||||
// This is only used for sanity check: in case context cleaning was not
|
||||
// properly set some request data can be kept forever, consuming an increasing
|
||||
// amount of memory. In case this is detected, Purge() must be called
|
||||
// periodically until the problem is fixed.
|
||||
func Purge(maxAge int) int {
|
||||
mutex.Lock()
|
||||
count := 0
|
||||
if maxAge <= 0 {
|
||||
count = len(data)
|
||||
data = make(map[*http.Request]map[interface{}]interface{})
|
||||
datat = make(map[*http.Request]int64)
|
||||
} else {
|
||||
min := time.Now().Unix() - int64(maxAge)
|
||||
for r := range data {
|
||||
if datat[r] < min {
|
||||
clear(r)
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex.Unlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// ClearHandler wraps an http.Handler and clears request values at the end
|
||||
// of a request lifetime.
|
||||
func ClearHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer Clear(r)
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package context stores values shared during a request lifetime.
|
||||
|
||||
Note: gorilla/context, having been born well before `context.Context` existed,
|
||||
does not play well > with the shallow copying of the request that
|
||||
[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
|
||||
(added to net/http Go 1.7 onwards) performs. You should either use *just*
|
||||
gorilla/context, or moving forward, the new `http.Request.Context()`.
|
||||
|
||||
For example, a router can set variables extracted from the URL and later
|
||||
application handlers can access those values, or it can be used to store
|
||||
sessions values to be saved at the end of a request. There are several
|
||||
others common uses.
|
||||
|
||||
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
|
||||
|
||||
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
|
||||
|
||||
Here's the basic usage: first define the keys that you will need. The key
|
||||
type is interface{} so a key can be of any type that supports equality.
|
||||
Here we define a key using a custom int type to avoid name collisions:
|
||||
|
||||
package foo
|
||||
|
||||
import (
|
||||
"github.com/gorilla/context"
|
||||
)
|
||||
|
||||
type key int
|
||||
|
||||
const MyKey key = 0
|
||||
|
||||
Then set a variable. Variables are bound to an http.Request object, so you
|
||||
need a request instance to set a value:
|
||||
|
||||
context.Set(r, MyKey, "bar")
|
||||
|
||||
The application can later access the variable using the same key you provided:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// val is "bar".
|
||||
val := context.Get(r, foo.MyKey)
|
||||
|
||||
// returns ("bar", true)
|
||||
val, ok := context.GetOk(r, foo.MyKey)
|
||||
// ...
|
||||
}
|
||||
|
||||
And that's all about the basic usage. We discuss some other ideas below.
|
||||
|
||||
Any type can be stored in the context. To enforce a given type, make the key
|
||||
private and wrap Get() and Set() to accept and return values of a specific
|
||||
type:
|
||||
|
||||
type key int
|
||||
|
||||
const mykey key = 0
|
||||
|
||||
// GetMyKey returns a value for this package from the request values.
|
||||
func GetMyKey(r *http.Request) SomeType {
|
||||
if rv := context.Get(r, mykey); rv != nil {
|
||||
return rv.(SomeType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMyKey sets a value for this package in the request values.
|
||||
func SetMyKey(r *http.Request, val SomeType) {
|
||||
context.Set(r, mykey, val)
|
||||
}
|
||||
|
||||
Variables must be cleared at the end of a request, to remove all values
|
||||
that were stored. This can be done in an http.Handler, after a request was
|
||||
served. Just call Clear() passing the request:
|
||||
|
||||
context.Clear(r)
|
||||
|
||||
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
|
||||
variables at the end of a request lifetime.
|
||||
|
||||
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
|
||||
so if you are using either of them you don't need to clear the context manually.
|
||||
*/
|
||||
package context
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,80 +0,0 @@
|
|||
securecookie
|
||||
============
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/securecookie/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/securecookie?badge)
|
||||
|
||||
|
||||
securecookie encodes and decodes authenticated and optionally encrypted
|
||||
cookie values.
|
||||
|
||||
Secure cookies can't be forged, because their values are validated using HMAC.
|
||||
When encrypted, the content is also inaccessible to malicious eyes. It is still
|
||||
recommended that sensitive data not be stored in cookies, and that HTTPS be used
|
||||
to prevent cookie [replay attacks](https://en.wikipedia.org/wiki/Replay_attack).
|
||||
|
||||
## Examples
|
||||
|
||||
To use it, first create a new SecureCookie instance:
|
||||
|
||||
```go
|
||||
// Hash keys should be at least 32 bytes long
|
||||
var hashKey = []byte("very-secret")
|
||||
// Block keys should be 16 bytes (AES-128) or 32 bytes (AES-256) long.
|
||||
// Shorter keys may weaken the encryption used.
|
||||
var blockKey = []byte("a-lot-secret")
|
||||
var s = securecookie.New(hashKey, blockKey)
|
||||
```
|
||||
|
||||
The hashKey is required, used to authenticate the cookie value using HMAC.
|
||||
It is recommended to use a key with 32 or 64 bytes.
|
||||
|
||||
The blockKey is optional, used to encrypt the cookie value -- set it to nil
|
||||
to not use encryption. If set, the length must correspond to the block size
|
||||
of the encryption algorithm. For AES, used by default, valid lengths are
|
||||
16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.
|
||||
|
||||
Strong keys can be created using the convenience function GenerateRandomKey().
|
||||
|
||||
Once a SecureCookie instance is set, use it to encode a cookie value:
|
||||
|
||||
```go
|
||||
func SetCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
value := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
if encoded, err := s.Encode("cookie-name", value); err == nil {
|
||||
cookie := &http.Cookie{
|
||||
Name: "cookie-name",
|
||||
Value: encoded,
|
||||
Path: "/",
|
||||
Secure: true,
|
||||
HttpOnly: true,
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Later, use the same SecureCookie instance to decode and validate a cookie
|
||||
value:
|
||||
|
||||
```go
|
||||
func ReadCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if cookie, err := r.Cookie("cookie-name"); err == nil {
|
||||
value := make(map[string]string)
|
||||
if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil {
|
||||
fmt.Fprintf(w, "The value of foo is %q", value["foo"])
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
We stored a map[string]string, but secure cookies can hold any value that
|
||||
can be encoded using `encoding/gob`. To store custom types, they must be
|
||||
registered first using gob.Register(). For basic types this is not needed;
|
||||
it works out of the box. An optional JSON encoder that uses `encoding/json` is
|
||||
available for types compatible with JSON.
|
||||
|
||||
## License
|
||||
|
||||
BSD licensed. See the LICENSE file for details.
|
|
@ -1,61 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package securecookie encodes and decodes authenticated and optionally
|
||||
encrypted cookie values.
|
||||
|
||||
Secure cookies can't be forged, because their values are validated using HMAC.
|
||||
When encrypted, the content is also inaccessible to malicious eyes.
|
||||
|
||||
To use it, first create a new SecureCookie instance:
|
||||
|
||||
var hashKey = []byte("very-secret")
|
||||
var blockKey = []byte("a-lot-secret")
|
||||
var s = securecookie.New(hashKey, blockKey)
|
||||
|
||||
The hashKey is required, used to authenticate the cookie value using HMAC.
|
||||
It is recommended to use a key with 32 or 64 bytes.
|
||||
|
||||
The blockKey is optional, used to encrypt the cookie value -- set it to nil
|
||||
to not use encryption. If set, the length must correspond to the block size
|
||||
of the encryption algorithm. For AES, used by default, valid lengths are
|
||||
16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.
|
||||
|
||||
Strong keys can be created using the convenience function GenerateRandomKey().
|
||||
|
||||
Once a SecureCookie instance is set, use it to encode a cookie value:
|
||||
|
||||
func SetCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
value := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
if encoded, err := s.Encode("cookie-name", value); err == nil {
|
||||
cookie := &http.Cookie{
|
||||
Name: "cookie-name",
|
||||
Value: encoded,
|
||||
Path: "/",
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
}
|
||||
|
||||
Later, use the same SecureCookie instance to decode and validate a cookie
|
||||
value:
|
||||
|
||||
func ReadCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if cookie, err := r.Cookie("cookie-name"); err == nil {
|
||||
value := make(map[string]string)
|
||||
if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil {
|
||||
fmt.Fprintf(w, "The value of foo is %q", value["foo"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
We stored a map[string]string, but secure cookies can hold any value that
|
||||
can be encoded using encoding/gob. To store custom types, they must be
|
||||
registered first using gob.Register(). For basic types this is not needed;
|
||||
it works out of the box.
|
||||
*/
|
||||
package securecookie
|
|
@ -1,25 +0,0 @@
|
|||
// +build gofuzz
|
||||
|
||||
package securecookie
|
||||
|
||||
var hashKey = []byte("very-secret12345")
|
||||
var blockKey = []byte("a-lot-secret1234")
|
||||
var s = New(hashKey, blockKey)
|
||||
|
||||
type Cookie struct {
|
||||
B bool
|
||||
I int
|
||||
S string
|
||||
}
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
datas := string(data)
|
||||
var c Cookie
|
||||
if err := s.Decode("fuzz", datas, &c); err != nil {
|
||||
return 0
|
||||
}
|
||||
if _, err := s.Encode("fuzz", c); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return 1
|
||||
}
|
|
@ -1,646 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securecookie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Error is the interface of all errors returned by functions in this library.
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
// IsUsage returns true for errors indicating the client code probably
|
||||
// uses this library incorrectly. For example, the client may have
|
||||
// failed to provide a valid hash key, or may have failed to configure
|
||||
// the Serializer adequately for encoding value.
|
||||
IsUsage() bool
|
||||
|
||||
// IsDecode returns true for errors indicating that a cookie could not
|
||||
// be decoded and validated. Since cookies are usually untrusted
|
||||
// user-provided input, errors of this type should be expected.
|
||||
// Usually, the proper action is simply to reject the request.
|
||||
IsDecode() bool
|
||||
|
||||
// IsInternal returns true for unexpected errors occurring in the
|
||||
// securecookie implementation.
|
||||
IsInternal() bool
|
||||
|
||||
// Cause, if it returns a non-nil value, indicates that this error was
|
||||
// propagated from some underlying library. If this method returns nil,
|
||||
// this error was raised directly by this library.
|
||||
//
|
||||
// Cause is provided principally for debugging/logging purposes; it is
|
||||
// rare that application logic should perform meaningfully different
|
||||
// logic based on Cause. See, for example, the caveats described on
|
||||
// (MultiError).Cause().
|
||||
Cause() error
|
||||
}
|
||||
|
||||
// errorType is a bitmask giving the error type(s) of an cookieError value.
|
||||
type errorType int
|
||||
|
||||
const (
|
||||
usageError = errorType(1 << iota)
|
||||
decodeError
|
||||
internalError
|
||||
)
|
||||
|
||||
type cookieError struct {
|
||||
typ errorType
|
||||
msg string
|
||||
cause error
|
||||
}
|
||||
|
||||
func (e cookieError) IsUsage() bool { return (e.typ & usageError) != 0 }
|
||||
func (e cookieError) IsDecode() bool { return (e.typ & decodeError) != 0 }
|
||||
func (e cookieError) IsInternal() bool { return (e.typ & internalError) != 0 }
|
||||
|
||||
func (e cookieError) Cause() error { return e.cause }
|
||||
|
||||
func (e cookieError) Error() string {
|
||||
parts := []string{"securecookie: "}
|
||||
if e.msg == "" {
|
||||
parts = append(parts, "error")
|
||||
} else {
|
||||
parts = append(parts, e.msg)
|
||||
}
|
||||
if c := e.Cause(); c != nil {
|
||||
parts = append(parts, " - caused by: ", c.Error())
|
||||
}
|
||||
return strings.Join(parts, "")
|
||||
}
|
||||
|
||||
var (
|
||||
errGeneratingIV = cookieError{typ: internalError, msg: "failed to generate random iv"}
|
||||
|
||||
errNoCodecs = cookieError{typ: usageError, msg: "no codecs provided"}
|
||||
errHashKeyNotSet = cookieError{typ: usageError, msg: "hash key is not set"}
|
||||
errBlockKeyNotSet = cookieError{typ: usageError, msg: "block key is not set"}
|
||||
errEncodedValueTooLong = cookieError{typ: usageError, msg: "the value is too long"}
|
||||
|
||||
errValueToDecodeTooLong = cookieError{typ: decodeError, msg: "the value is too long"}
|
||||
errTimestampInvalid = cookieError{typ: decodeError, msg: "invalid timestamp"}
|
||||
errTimestampTooNew = cookieError{typ: decodeError, msg: "timestamp is too new"}
|
||||
errTimestampExpired = cookieError{typ: decodeError, msg: "expired timestamp"}
|
||||
errDecryptionFailed = cookieError{typ: decodeError, msg: "the value could not be decrypted"}
|
||||
errValueNotByte = cookieError{typ: decodeError, msg: "value not a []byte."}
|
||||
errValueNotBytePtr = cookieError{typ: decodeError, msg: "value not a pointer to []byte."}
|
||||
|
||||
// ErrMacInvalid indicates that cookie decoding failed because the HMAC
|
||||
// could not be extracted and verified. Direct use of this error
|
||||
// variable is deprecated; it is public only for legacy compatibility,
|
||||
// and may be privatized in the future, as it is rarely useful to
|
||||
// distinguish between this error and other Error implementations.
|
||||
ErrMacInvalid = cookieError{typ: decodeError, msg: "the value is not valid"}
|
||||
)
|
||||
|
||||
// Codec defines an interface to encode and decode cookie values.
|
||||
type Codec interface {
|
||||
Encode(name string, value interface{}) (string, error)
|
||||
Decode(name, value string, dst interface{}) error
|
||||
}
|
||||
|
||||
// New returns a new SecureCookie.
|
||||
//
|
||||
// hashKey is required, used to authenticate values using HMAC. Create it using
|
||||
// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes.
|
||||
//
|
||||
// blockKey is optional, used to encrypt values. Create it using
|
||||
// GenerateRandomKey(). The key length must correspond to the block size
|
||||
// of the encryption algorithm. For AES, used by default, valid lengths are
|
||||
// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.
|
||||
// The default encoder used for cookie serialization is encoding/gob.
|
||||
//
|
||||
// Note that keys created using GenerateRandomKey() are not automatically
|
||||
// persisted. New keys will be created when the application is restarted, and
|
||||
// previously issued cookies will not be able to be decoded.
|
||||
func New(hashKey, blockKey []byte) *SecureCookie {
|
||||
s := &SecureCookie{
|
||||
hashKey: hashKey,
|
||||
blockKey: blockKey,
|
||||
hashFunc: sha256.New,
|
||||
maxAge: 86400 * 30,
|
||||
maxLength: 4096,
|
||||
sz: GobEncoder{},
|
||||
}
|
||||
if hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
}
|
||||
if blockKey != nil {
|
||||
s.BlockFunc(aes.NewCipher)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SecureCookie encodes and decodes authenticated and optionally encrypted
|
||||
// cookie values.
|
||||
type SecureCookie struct {
|
||||
hashKey []byte
|
||||
hashFunc func() hash.Hash
|
||||
blockKey []byte
|
||||
block cipher.Block
|
||||
maxLength int
|
||||
maxAge int64
|
||||
minAge int64
|
||||
err error
|
||||
sz Serializer
|
||||
// For testing purposes, the function that returns the current timestamp.
|
||||
// If not set, it will use time.Now().UTC().Unix().
|
||||
timeFunc func() int64
|
||||
}
|
||||
|
||||
// Serializer provides an interface for providing custom serializers for cookie
|
||||
// values.
|
||||
type Serializer interface {
|
||||
Serialize(src interface{}) ([]byte, error)
|
||||
Deserialize(src []byte, dst interface{}) error
|
||||
}
|
||||
|
||||
// GobEncoder encodes cookie values using encoding/gob. This is the simplest
|
||||
// encoder and can handle complex types via gob.Register.
|
||||
type GobEncoder struct{}
|
||||
|
||||
// JSONEncoder encodes cookie values using encoding/json. Users who wish to
|
||||
// encode complex types need to satisfy the json.Marshaller and
|
||||
// json.Unmarshaller interfaces.
|
||||
type JSONEncoder struct{}
|
||||
|
||||
// NopEncoder does not encode cookie values, and instead simply accepts a []byte
|
||||
// (as an interface{}) and returns a []byte. This is particularly useful when
|
||||
// you encoding an object upstream and do not wish to re-encode it.
|
||||
type NopEncoder struct{}
|
||||
|
||||
// MaxLength restricts the maximum length, in bytes, for the cookie value.
|
||||
//
|
||||
// Default is 4096, which is the maximum value accepted by Internet Explorer.
|
||||
func (s *SecureCookie) MaxLength(value int) *SecureCookie {
|
||||
s.maxLength = value
|
||||
return s
|
||||
}
|
||||
|
||||
// MaxAge restricts the maximum age, in seconds, for the cookie value.
|
||||
//
|
||||
// Default is 86400 * 30. Set it to 0 for no restriction.
|
||||
func (s *SecureCookie) MaxAge(value int) *SecureCookie {
|
||||
s.maxAge = int64(value)
|
||||
return s
|
||||
}
|
||||
|
||||
// MinAge restricts the minimum age, in seconds, for the cookie value.
|
||||
//
|
||||
// Default is 0 (no restriction).
|
||||
func (s *SecureCookie) MinAge(value int) *SecureCookie {
|
||||
s.minAge = int64(value)
|
||||
return s
|
||||
}
|
||||
|
||||
// HashFunc sets the hash function used to create HMAC.
|
||||
//
|
||||
// Default is crypto/sha256.New.
|
||||
func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie {
|
||||
s.hashFunc = f
|
||||
return s
|
||||
}
|
||||
|
||||
// BlockFunc sets the encryption function used to create a cipher.Block.
|
||||
//
|
||||
// Default is crypto/aes.New.
|
||||
func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie {
|
||||
if s.blockKey == nil {
|
||||
s.err = errBlockKeyNotSet
|
||||
} else if block, err := f(s.blockKey); err == nil {
|
||||
s.block = block
|
||||
} else {
|
||||
s.err = cookieError{cause: err, typ: usageError}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Encoding sets the encoding/serialization method for cookies.
|
||||
//
|
||||
// Default is encoding/gob. To encode special structures using encoding/gob,
|
||||
// they must be registered first using gob.Register().
|
||||
func (s *SecureCookie) SetSerializer(sz Serializer) *SecureCookie {
|
||||
s.sz = sz
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// Encode encodes a cookie value.
|
||||
//
|
||||
// It serializes, optionally encrypts, signs with a message authentication code,
|
||||
// and finally encodes the value.
|
||||
//
|
||||
// The name argument is the cookie name. It is stored with the encoded value.
|
||||
// The value argument is the value to be encoded. It can be any value that can
|
||||
// be encoded using the currently selected serializer; see SetSerializer().
|
||||
//
|
||||
// It is the client's responsibility to ensure that value, when encoded using
|
||||
// the current serialization/encryption settings on s and then base64-encoded,
|
||||
// is shorter than the maximum permissible length.
|
||||
func (s *SecureCookie) Encode(name string, value interface{}) (string, error) {
|
||||
if s.err != nil {
|
||||
return "", s.err
|
||||
}
|
||||
if s.hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
return "", s.err
|
||||
}
|
||||
var err error
|
||||
var b []byte
|
||||
// 1. Serialize.
|
||||
if b, err = s.sz.Serialize(value); err != nil {
|
||||
return "", cookieError{cause: err, typ: usageError}
|
||||
}
|
||||
// 2. Encrypt (optional).
|
||||
if s.block != nil {
|
||||
if b, err = encrypt(s.block, b); err != nil {
|
||||
return "", cookieError{cause: err, typ: usageError}
|
||||
}
|
||||
}
|
||||
b = encode(b)
|
||||
// 3. Create MAC for "name|date|value". Extra pipe to be used later.
|
||||
b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b))
|
||||
mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1])
|
||||
// Append mac, remove name.
|
||||
b = append(b, mac...)[len(name)+1:]
|
||||
// 4. Encode to base64.
|
||||
b = encode(b)
|
||||
// 5. Check length.
|
||||
if s.maxLength != 0 && len(b) > s.maxLength {
|
||||
return "", errEncodedValueTooLong
|
||||
}
|
||||
// Done.
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// Decode decodes a cookie value.
|
||||
//
|
||||
// It decodes, verifies a message authentication code, optionally decrypts and
|
||||
// finally deserializes the value.
|
||||
//
|
||||
// The name argument is the cookie name. It must be the same name used when
|
||||
// it was stored. The value argument is the encoded cookie value. The dst
|
||||
// argument is where the cookie will be decoded. It must be a pointer.
|
||||
func (s *SecureCookie) Decode(name, value string, dst interface{}) error {
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
if s.hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
return s.err
|
||||
}
|
||||
// 1. Check length.
|
||||
if s.maxLength != 0 && len(value) > s.maxLength {
|
||||
return errValueToDecodeTooLong
|
||||
}
|
||||
// 2. Decode from base64.
|
||||
b, err := decode([]byte(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 3. Verify MAC. Value is "date|value|mac".
|
||||
parts := bytes.SplitN(b, []byte("|"), 3)
|
||||
if len(parts) != 3 {
|
||||
return ErrMacInvalid
|
||||
}
|
||||
h := hmac.New(s.hashFunc, s.hashKey)
|
||||
b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...)
|
||||
if err = verifyMac(h, b, parts[2]); err != nil {
|
||||
return err
|
||||
}
|
||||
// 4. Verify date ranges.
|
||||
var t1 int64
|
||||
if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil {
|
||||
return errTimestampInvalid
|
||||
}
|
||||
t2 := s.timestamp()
|
||||
if s.minAge != 0 && t1 > t2-s.minAge {
|
||||
return errTimestampTooNew
|
||||
}
|
||||
if s.maxAge != 0 && t1 < t2-s.maxAge {
|
||||
return errTimestampExpired
|
||||
}
|
||||
// 5. Decrypt (optional).
|
||||
b, err = decode(parts[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.block != nil {
|
||||
if b, err = decrypt(s.block, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// 6. Deserialize.
|
||||
if err = s.sz.Deserialize(b, dst); err != nil {
|
||||
return cookieError{cause: err, typ: decodeError}
|
||||
}
|
||||
// Done.
|
||||
return nil
|
||||
}
|
||||
|
||||
// timestamp returns the current timestamp, in seconds.
|
||||
//
|
||||
// For testing purposes, the function that generates the timestamp can be
|
||||
// overridden. If not set, it will return time.Now().UTC().Unix().
|
||||
func (s *SecureCookie) timestamp() int64 {
|
||||
if s.timeFunc == nil {
|
||||
return time.Now().UTC().Unix()
|
||||
}
|
||||
return s.timeFunc()
|
||||
}
|
||||
|
||||
// Authentication -------------------------------------------------------------
|
||||
|
||||
// createMac creates a message authentication code (MAC).
|
||||
func createMac(h hash.Hash, value []byte) []byte {
|
||||
h.Write(value)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// verifyMac verifies that a message authentication code (MAC) is valid.
|
||||
func verifyMac(h hash.Hash, value []byte, mac []byte) error {
|
||||
mac2 := createMac(h, value)
|
||||
// Check that both MACs are of equal length, as subtle.ConstantTimeCompare
|
||||
// does not do this prior to Go 1.4.
|
||||
if len(mac) == len(mac2) && subtle.ConstantTimeCompare(mac, mac2) == 1 {
|
||||
return nil
|
||||
}
|
||||
return ErrMacInvalid
|
||||
}
|
||||
|
||||
// Encryption -----------------------------------------------------------------
|
||||
|
||||
// encrypt encrypts a value using the given block in counter mode.
|
||||
//
|
||||
// A random initialization vector (http://goo.gl/zF67k) with the length of the
|
||||
// block size is prepended to the resulting ciphertext.
|
||||
func encrypt(block cipher.Block, value []byte) ([]byte, error) {
|
||||
iv := GenerateRandomKey(block.BlockSize())
|
||||
if iv == nil {
|
||||
return nil, errGeneratingIV
|
||||
}
|
||||
// Encrypt it.
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
stream.XORKeyStream(value, value)
|
||||
// Return iv + ciphertext.
|
||||
return append(iv, value...), nil
|
||||
}
|
||||
|
||||
// decrypt decrypts a value using the given block in counter mode.
|
||||
//
|
||||
// The value to be decrypted must be prepended by a initialization vector
|
||||
// (http://goo.gl/zF67k) with the length of the block size.
|
||||
func decrypt(block cipher.Block, value []byte) ([]byte, error) {
|
||||
size := block.BlockSize()
|
||||
if len(value) > size {
|
||||
// Extract iv.
|
||||
iv := value[:size]
|
||||
// Extract ciphertext.
|
||||
value = value[size:]
|
||||
// Decrypt it.
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
stream.XORKeyStream(value, value)
|
||||
return value, nil
|
||||
}
|
||||
return nil, errDecryptionFailed
|
||||
}
|
||||
|
||||
// Serialization --------------------------------------------------------------
|
||||
|
||||
// Serialize encodes a value using gob.
|
||||
func (e GobEncoder) Serialize(src interface{}) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
enc := gob.NewEncoder(buf)
|
||||
if err := enc.Encode(src); err != nil {
|
||||
return nil, cookieError{cause: err, typ: usageError}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Deserialize decodes a value using gob.
|
||||
func (e GobEncoder) Deserialize(src []byte, dst interface{}) error {
|
||||
dec := gob.NewDecoder(bytes.NewBuffer(src))
|
||||
if err := dec.Decode(dst); err != nil {
|
||||
return cookieError{cause: err, typ: decodeError}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize encodes a value using encoding/json.
|
||||
func (e JSONEncoder) Serialize(src interface{}) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
enc := json.NewEncoder(buf)
|
||||
if err := enc.Encode(src); err != nil {
|
||||
return nil, cookieError{cause: err, typ: usageError}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Deserialize decodes a value using encoding/json.
|
||||
func (e JSONEncoder) Deserialize(src []byte, dst interface{}) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(src))
|
||||
if err := dec.Decode(dst); err != nil {
|
||||
return cookieError{cause: err, typ: decodeError}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serialize passes a []byte through as-is.
|
||||
func (e NopEncoder) Serialize(src interface{}) ([]byte, error) {
|
||||
if b, ok := src.([]byte); ok {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
return nil, errValueNotByte
|
||||
}
|
||||
|
||||
// Deserialize passes a []byte through as-is.
|
||||
func (e NopEncoder) Deserialize(src []byte, dst interface{}) error {
|
||||
if dat, ok := dst.(*[]byte); ok {
|
||||
*dat = src
|
||||
return nil
|
||||
}
|
||||
return errValueNotBytePtr
|
||||
}
|
||||
|
||||
// Encoding -------------------------------------------------------------------
|
||||
|
||||
// encode encodes a value using base64.
|
||||
func encode(value []byte) []byte {
|
||||
encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value)))
|
||||
base64.URLEncoding.Encode(encoded, value)
|
||||
return encoded
|
||||
}
|
||||
|
||||
// decode decodes a cookie using base64.
|
||||
func decode(value []byte) ([]byte, error) {
|
||||
decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value)))
|
||||
b, err := base64.URLEncoding.Decode(decoded, value)
|
||||
if err != nil {
|
||||
return nil, cookieError{cause: err, typ: decodeError, msg: "base64 decode failed"}
|
||||
}
|
||||
return decoded[:b], nil
|
||||
}
|
||||
|
||||
// Helpers --------------------------------------------------------------------
|
||||
|
||||
// GenerateRandomKey creates a random key with the given length in bytes.
|
||||
// On failure, returns nil.
|
||||
//
|
||||
// Callers should explicitly check for the possibility of a nil return, treat
|
||||
// it as a failure of the system random number generator, and not continue.
|
||||
func GenerateRandomKey(length int) []byte {
|
||||
k := make([]byte, length)
|
||||
if _, err := io.ReadFull(rand.Reader, k); err != nil {
|
||||
return nil
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// CodecsFromPairs returns a slice of SecureCookie instances.
|
||||
//
|
||||
// It is a convenience function to create a list of codecs for key rotation. Note
|
||||
// that the generated Codecs will have the default options applied: callers
|
||||
// should iterate over each Codec and type-assert the underlying *SecureCookie to
|
||||
// change these.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// codecs := securecookie.CodecsFromPairs(
|
||||
// []byte("new-hash-key"),
|
||||
// []byte("new-block-key"),
|
||||
// []byte("old-hash-key"),
|
||||
// []byte("old-block-key"),
|
||||
// )
|
||||
//
|
||||
// // Modify each instance.
|
||||
// for _, s := range codecs {
|
||||
// if cookie, ok := s.(*securecookie.SecureCookie); ok {
|
||||
// cookie.MaxAge(86400 * 7)
|
||||
// cookie.SetSerializer(securecookie.JSONEncoder{})
|
||||
// cookie.HashFunc(sha512.New512_256)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
func CodecsFromPairs(keyPairs ...[]byte) []Codec {
|
||||
codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2)
|
||||
for i := 0; i < len(keyPairs); i += 2 {
|
||||
var blockKey []byte
|
||||
if i+1 < len(keyPairs) {
|
||||
blockKey = keyPairs[i+1]
|
||||
}
|
||||
codecs[i/2] = New(keyPairs[i], blockKey)
|
||||
}
|
||||
return codecs
|
||||
}
|
||||
|
||||
// EncodeMulti encodes a cookie value using a group of codecs.
|
||||
//
|
||||
// The codecs are tried in order. Multiple codecs are accepted to allow
|
||||
// key rotation.
|
||||
//
|
||||
// On error, may return a MultiError.
|
||||
func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) {
|
||||
if len(codecs) == 0 {
|
||||
return "", errNoCodecs
|
||||
}
|
||||
|
||||
var errors MultiError
|
||||
for _, codec := range codecs {
|
||||
encoded, err := codec.Encode(name, value)
|
||||
if err == nil {
|
||||
return encoded, nil
|
||||
}
|
||||
errors = append(errors, err)
|
||||
}
|
||||
return "", errors
|
||||
}
|
||||
|
||||
// DecodeMulti decodes a cookie value using a group of codecs.
|
||||
//
|
||||
// The codecs are tried in order. Multiple codecs are accepted to allow
|
||||
// key rotation.
|
||||
//
|
||||
// On error, may return a MultiError.
|
||||
func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error {
|
||||
if len(codecs) == 0 {
|
||||
return errNoCodecs
|
||||
}
|
||||
|
||||
var errors MultiError
|
||||
for _, codec := range codecs {
|
||||
err := codec.Decode(name, value, dst)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errors = append(errors, err)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
// MultiError groups multiple errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) IsUsage() bool { return m.any(func(e Error) bool { return e.IsUsage() }) }
|
||||
func (m MultiError) IsDecode() bool { return m.any(func(e Error) bool { return e.IsDecode() }) }
|
||||
func (m MultiError) IsInternal() bool { return m.any(func(e Error) bool { return e.IsInternal() }) }
|
||||
|
||||
// Cause returns nil for MultiError; there is no unique underlying cause in the
|
||||
// general case.
|
||||
//
|
||||
// Note: we could conceivably return a non-nil Cause only when there is exactly
|
||||
// one child error with a Cause. However, it would be brittle for client code
|
||||
// to rely on the arity of causes inside a MultiError, so we have opted not to
|
||||
// provide this functionality. Clients which really wish to access the Causes
|
||||
// of the underlying errors are free to iterate through the errors themselves.
|
||||
func (m MultiError) Cause() error { return nil }
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
||||
|
||||
// any returns true if any element of m is an Error for which pred returns true.
|
||||
func (m MultiError) any(pred func(Error) bool) bool {
|
||||
for _, e := range m {
|
||||
if ourErr, ok := e.(Error); ok && pred(ourErr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,90 +0,0 @@
|
|||
sessions
|
||||
========
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/sessions?status.svg)](https://godoc.org/github.com/gorilla/sessions) [![Build Status](https://travis-ci.org/gorilla/sessions.png?branch=master)](https://travis-ci.org/gorilla/sessions)
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/sessions/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/sessions?badge)
|
||||
|
||||
|
||||
gorilla/sessions provides cookie and filesystem sessions and infrastructure for
|
||||
custom session backends.
|
||||
|
||||
The key features are:
|
||||
|
||||
* Simple API: use it as an easy way to set signed (and optionally
|
||||
encrypted) cookies.
|
||||
* Built-in backends to store sessions in cookies or the filesystem.
|
||||
* Flash messages: session values that last until read.
|
||||
* Convenient way to switch session persistency (aka "remember me") and set
|
||||
other attributes.
|
||||
* Mechanism to rotate authentication and encryption keys.
|
||||
* Multiple sessions per request, even using different backends.
|
||||
* Interfaces and infrastructure for custom session backends: sessions from
|
||||
different stores can be retrieved and batch-saved using a common API.
|
||||
|
||||
Let's start with an example that shows the sessions API in a nutshell:
|
||||
|
||||
```go
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/gorilla/sessions"
|
||||
)
|
||||
|
||||
var store = sessions.NewCookieStore([]byte("something-very-secret"))
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session. We're ignoring the error resulted from decoding an
|
||||
// existing session: Get() always returns a session, even if empty.
|
||||
session, _ := store.Get(r, "session-name")
|
||||
// Set some session values.
|
||||
session.Values["foo"] = "bar"
|
||||
session.Values[42] = 43
|
||||
// Save it before we write to the response/return from the handler.
|
||||
session.Save(r, w)
|
||||
}
|
||||
```
|
||||
|
||||
First we initialize a session store calling `NewCookieStore()` and passing a
|
||||
secret key used to authenticate the session. Inside the handler, we call
|
||||
`store.Get()` to retrieve an existing session or a new one. Then we set some
|
||||
session values in session.Values, which is a `map[interface{}]interface{}`.
|
||||
And finally we call `session.Save()` to save the session in the response.
|
||||
|
||||
Important Note: If you aren't using gorilla/mux, you need to wrap your handlers
|
||||
with
|
||||
[`context.ClearHandler`](http://www.gorillatoolkit.org/pkg/context#ClearHandler)
|
||||
as or else you will leak memory! An easy way to do this is to wrap the top-level
|
||||
mux when calling http.ListenAndServe:
|
||||
|
||||
```go
|
||||
http.ListenAndServe(":8080", context.ClearHandler(http.DefaultServeMux))
|
||||
```
|
||||
|
||||
The ClearHandler function is provided by the gorilla/context package.
|
||||
|
||||
More examples are available [on the Gorilla
|
||||
website](http://www.gorillatoolkit.org/pkg/sessions).
|
||||
|
||||
## Store Implementations
|
||||
|
||||
Other implementations of the `sessions.Store` interface:
|
||||
|
||||
* [github.com/starJammer/gorilla-sessions-arangodb](https://github.com/starJammer/gorilla-sessions-arangodb) - ArangoDB
|
||||
* [github.com/yosssi/boltstore](https://github.com/yosssi/boltstore) - Bolt
|
||||
* [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase
|
||||
* [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS
|
||||
* [github.com/savaki/dynastore](https://github.com/savaki/dynastore) - DynamoDB on AWS (Official AWS library)
|
||||
* [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache
|
||||
* [github.com/dsoprea/go-appengine-sessioncascade](https://github.com/dsoprea/go-appengine-sessioncascade) - Memcache/Datastore/Context in AppEngine
|
||||
* [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB
|
||||
* [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL
|
||||
* [github.com/EnumApps/clustersqlstore](https://github.com/EnumApps/clustersqlstore) - MySQL Cluster
|
||||
* [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL
|
||||
* [github.com/boj/redistore](https://github.com/boj/redistore) - Redis
|
||||
* [github.com/boj/rethinkstore](https://github.com/boj/rethinkstore) - RethinkDB
|
||||
* [github.com/boj/riakstore](https://github.com/boj/riakstore) - Riak
|
||||
* [github.com/michaeljs1990/sqlitestore](https://github.com/michaeljs1990/sqlitestore) - SQLite
|
||||
* [github.com/wader/gormstore](https://github.com/wader/gormstore) - GORM (MySQL, PostgreSQL, SQLite)
|
||||
* [github.com/gernest/qlstore](https://github.com/gernest/qlstore) - ql
|
||||
|
||||
## License
|
||||
|
||||
BSD licensed. See the LICENSE file for details.
|
|
@ -1,198 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package sessions provides cookie and filesystem sessions and
|
||||
infrastructure for custom session backends.
|
||||
|
||||
The key features are:
|
||||
|
||||
* Simple API: use it as an easy way to set signed (and optionally
|
||||
encrypted) cookies.
|
||||
* Built-in backends to store sessions in cookies or the filesystem.
|
||||
* Flash messages: session values that last until read.
|
||||
* Convenient way to switch session persistency (aka "remember me") and set
|
||||
other attributes.
|
||||
* Mechanism to rotate authentication and encryption keys.
|
||||
* Multiple sessions per request, even using different backends.
|
||||
* Interfaces and infrastructure for custom session backends: sessions from
|
||||
different stores can be retrieved and batch-saved using a common API.
|
||||
|
||||
Let's start with an example that shows the sessions API in a nutshell:
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/gorilla/sessions"
|
||||
)
|
||||
|
||||
var store = sessions.NewCookieStore([]byte("something-very-secret"))
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session. Get() always returns a session, even if empty.
|
||||
session, err := store.Get(r, "session-name")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Set some session values.
|
||||
session.Values["foo"] = "bar"
|
||||
session.Values[42] = 43
|
||||
// Save it before we write to the response/return from the handler.
|
||||
session.Save(r, w)
|
||||
}
|
||||
|
||||
First we initialize a session store calling NewCookieStore() and passing a
|
||||
secret key used to authenticate the session. Inside the handler, we call
|
||||
store.Get() to retrieve an existing session or a new one. Then we set some
|
||||
session values in session.Values, which is a map[interface{}]interface{}.
|
||||
And finally we call session.Save() to save the session in the response.
|
||||
|
||||
Note that in production code, we should check for errors when calling
|
||||
session.Save(r, w), and either display an error message or otherwise handle it.
|
||||
|
||||
Save must be called before writing to the response, otherwise the session
|
||||
cookie will not be sent to the client.
|
||||
|
||||
Important Note: If you aren't using gorilla/mux, you need to wrap your handlers
|
||||
with context.ClearHandler as or else you will leak memory! An easy way to do this
|
||||
is to wrap the top-level mux when calling http.ListenAndServe:
|
||||
|
||||
http.ListenAndServe(":8080", context.ClearHandler(http.DefaultServeMux))
|
||||
|
||||
The ClearHandler function is provided by the gorilla/context package.
|
||||
|
||||
That's all you need to know for the basic usage. Let's take a look at other
|
||||
options, starting with flash messages.
|
||||
|
||||
Flash messages are session values that last until read. The term appeared with
|
||||
Ruby On Rails a few years back. When we request a flash message, it is removed
|
||||
from the session. To add a flash, call session.AddFlash(), and to get all
|
||||
flashes, call session.Flashes(). Here is an example:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session.
|
||||
session, err := store.Get(r, "session-name")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Get the previously flashes, if any.
|
||||
if flashes := session.Flashes(); len(flashes) > 0 {
|
||||
// Use the flash values.
|
||||
} else {
|
||||
// Set a new flash.
|
||||
session.AddFlash("Hello, flash messages world!")
|
||||
}
|
||||
session.Save(r, w)
|
||||
}
|
||||
|
||||
Flash messages are useful to set information to be read after a redirection,
|
||||
like after form submissions.
|
||||
|
||||
There may also be cases where you want to store a complex datatype within a
|
||||
session, such as a struct. Sessions are serialised using the encoding/gob package,
|
||||
so it is easy to register new datatypes for storage in sessions:
|
||||
|
||||
import(
|
||||
"encoding/gob"
|
||||
"github.com/gorilla/sessions"
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
Email string
|
||||
Age int
|
||||
}
|
||||
|
||||
type M map[string]interface{}
|
||||
|
||||
func init() {
|
||||
|
||||
gob.Register(&Person{})
|
||||
gob.Register(&M{})
|
||||
}
|
||||
|
||||
As it's not possible to pass a raw type as a parameter to a function, gob.Register()
|
||||
relies on us passing it a value of the desired type. In the example above we've passed
|
||||
it a pointer to a struct and a pointer to a custom type representing a
|
||||
map[string]interface. (We could have passed non-pointer values if we wished.) This will
|
||||
then allow us to serialise/deserialise values of those types to and from our sessions.
|
||||
|
||||
Note that because session values are stored in a map[string]interface{}, there's
|
||||
a need to type-assert data when retrieving it. We'll use the Person struct we registered above:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
session, err := store.Get(r, "session-name")
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve our struct and type-assert it
|
||||
val := session.Values["person"]
|
||||
var person = &Person{}
|
||||
if person, ok := val.(*Person); !ok {
|
||||
// Handle the case that it's not an expected type
|
||||
}
|
||||
|
||||
// Now we can use our person object
|
||||
}
|
||||
|
||||
By default, session cookies last for a month. This is probably too long for
|
||||
some cases, but it is easy to change this and other attributes during
|
||||
runtime. Sessions can be configured individually or the store can be
|
||||
configured and then all sessions saved using it will use that configuration.
|
||||
We access session.Options or store.Options to set a new configuration. The
|
||||
fields are basically a subset of http.Cookie fields. Let's change the
|
||||
maximum age of a session to one week:
|
||||
|
||||
session.Options = &sessions.Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 7,
|
||||
HttpOnly: true,
|
||||
}
|
||||
|
||||
Sometimes we may want to change authentication and/or encryption keys without
|
||||
breaking existing sessions. The CookieStore supports key rotation, and to use
|
||||
it you just need to set multiple authentication and encryption keys, in pairs,
|
||||
to be tested in order:
|
||||
|
||||
var store = sessions.NewCookieStore(
|
||||
[]byte("new-authentication-key"),
|
||||
[]byte("new-encryption-key"),
|
||||
[]byte("old-authentication-key"),
|
||||
[]byte("old-encryption-key"),
|
||||
)
|
||||
|
||||
New sessions will be saved using the first pair. Old sessions can still be
|
||||
read because the first pair will fail, and the second will be tested. This
|
||||
makes it easy to "rotate" secret keys and still be able to validate existing
|
||||
sessions. Note: for all pairs the encryption key is optional; set it to nil
|
||||
or omit it and and encryption won't be used.
|
||||
|
||||
Multiple sessions can be used in the same request, even with different
|
||||
session backends. When this happens, calling Save() on each session
|
||||
individually would be cumbersome, so we have a way to save all sessions
|
||||
at once: it's sessions.Save(). Here's an example:
|
||||
|
||||
var store = sessions.NewCookieStore([]byte("something-very-secret"))
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session and set a value.
|
||||
session1, _ := store.Get(r, "session-one")
|
||||
session1.Values["foo"] = "bar"
|
||||
// Get another session and set another value.
|
||||
session2, _ := store.Get(r, "session-two")
|
||||
session2.Values[42] = 43
|
||||
// Save all sessions.
|
||||
sessions.Save(r, w)
|
||||
}
|
||||
|
||||
This is possible because when we call Get() from a session store, it adds the
|
||||
session to a common registry. Save() uses it to save all registered sessions.
|
||||
*/
|
||||
package sessions
|
|
@ -1,102 +0,0 @@
|
|||
// This file contains code adapted from the Go standard library
|
||||
// https://github.com/golang/go/blob/39ad0fd0789872f9469167be7fe9578625ff246e/src/net/http/lex.go
|
||||
|
||||
package sessions
|
||||
|
||||
import "strings"
|
||||
|
||||
var isTokenTable = [127]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
func isToken(r rune) bool {
|
||||
i := int(r)
|
||||
return i < len(isTokenTable) && isTokenTable[i]
|
||||
}
|
||||
|
||||
func isNotToken(r rune) bool {
|
||||
return !isToken(r)
|
||||
}
|
||||
|
||||
func isCookieNameValid(raw string) bool {
|
||||
if raw == "" {
|
||||
return false
|
||||
}
|
||||
return strings.IndexFunc(raw, isNotToken) < 0
|
||||
}
|
|
@ -1,241 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/context"
|
||||
)
|
||||
|
||||
// Default flashes key.
|
||||
const flashesKey = "_flash"
|
||||
|
||||
// Options --------------------------------------------------------------------
|
||||
|
||||
// Options stores configuration for a session or session store.
|
||||
//
|
||||
// Fields are a subset of http.Cookie fields.
|
||||
type Options struct {
|
||||
Path string
|
||||
Domain string
|
||||
// MaxAge=0 means no 'Max-Age' attribute specified.
|
||||
// MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'.
|
||||
// MaxAge>0 means Max-Age attribute present and given in seconds.
|
||||
MaxAge int
|
||||
Secure bool
|
||||
HttpOnly bool
|
||||
}
|
||||
|
||||
// Session --------------------------------------------------------------------
|
||||
|
||||
// NewSession is called by session stores to create a new session instance.
|
||||
func NewSession(store Store, name string) *Session {
|
||||
return &Session{
|
||||
Values: make(map[interface{}]interface{}),
|
||||
store: store,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Session stores the values and optional configuration for a session.
|
||||
type Session struct {
|
||||
// The ID of the session, generated by stores. It should not be used for
|
||||
// user data.
|
||||
ID string
|
||||
// Values contains the user-data for the session.
|
||||
Values map[interface{}]interface{}
|
||||
Options *Options
|
||||
IsNew bool
|
||||
store Store
|
||||
name string
|
||||
}
|
||||
|
||||
// Flashes returns a slice of flash messages from the session.
|
||||
//
|
||||
// A single variadic argument is accepted, and it is optional: it defines
|
||||
// the flash key. If not defined "_flash" is used by default.
|
||||
func (s *Session) Flashes(vars ...string) []interface{} {
|
||||
var flashes []interface{}
|
||||
key := flashesKey
|
||||
if len(vars) > 0 {
|
||||
key = vars[0]
|
||||
}
|
||||
if v, ok := s.Values[key]; ok {
|
||||
// Drop the flashes and return it.
|
||||
delete(s.Values, key)
|
||||
flashes = v.([]interface{})
|
||||
}
|
||||
return flashes
|
||||
}
|
||||
|
||||
// AddFlash adds a flash message to the session.
|
||||
//
|
||||
// A single variadic argument is accepted, and it is optional: it defines
|
||||
// the flash key. If not defined "_flash" is used by default.
|
||||
func (s *Session) AddFlash(value interface{}, vars ...string) {
|
||||
key := flashesKey
|
||||
if len(vars) > 0 {
|
||||
key = vars[0]
|
||||
}
|
||||
var flashes []interface{}
|
||||
if v, ok := s.Values[key]; ok {
|
||||
flashes = v.([]interface{})
|
||||
}
|
||||
s.Values[key] = append(flashes, value)
|
||||
}
|
||||
|
||||
// Save is a convenience method to save this session. It is the same as calling
|
||||
// store.Save(request, response, session). You should call Save before writing to
|
||||
// the response or returning from the handler.
|
||||
func (s *Session) Save(r *http.Request, w http.ResponseWriter) error {
|
||||
return s.store.Save(r, w, s)
|
||||
}
|
||||
|
||||
// Name returns the name used to register the session.
|
||||
func (s *Session) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Store returns the session store used to register the session.
|
||||
func (s *Session) Store() Store {
|
||||
return s.store
|
||||
}
|
||||
|
||||
// Registry -------------------------------------------------------------------
|
||||
|
||||
// sessionInfo stores a session tracked by the registry.
|
||||
type sessionInfo struct {
|
||||
s *Session
|
||||
e error
|
||||
}
|
||||
|
||||
// contextKey is the type used to store the registry in the context.
|
||||
type contextKey int
|
||||
|
||||
// registryKey is the key used to store the registry in the context.
|
||||
const registryKey contextKey = 0
|
||||
|
||||
// GetRegistry returns a registry instance for the current request.
|
||||
func GetRegistry(r *http.Request) *Registry {
|
||||
registry := context.Get(r, registryKey)
|
||||
if registry != nil {
|
||||
return registry.(*Registry)
|
||||
}
|
||||
newRegistry := &Registry{
|
||||
request: r,
|
||||
sessions: make(map[string]sessionInfo),
|
||||
}
|
||||
context.Set(r, registryKey, newRegistry)
|
||||
return newRegistry
|
||||
}
|
||||
|
||||
// Registry stores sessions used during a request.
|
||||
type Registry struct {
|
||||
request *http.Request
|
||||
sessions map[string]sessionInfo
|
||||
}
|
||||
|
||||
// Get registers and returns a session for the given name and session store.
|
||||
//
|
||||
// It returns a new session if there are no sessions registered for the name.
|
||||
func (s *Registry) Get(store Store, name string) (session *Session, err error) {
|
||||
if !isCookieNameValid(name) {
|
||||
return nil, fmt.Errorf("sessions: invalid character in cookie name: %s", name)
|
||||
}
|
||||
if info, ok := s.sessions[name]; ok {
|
||||
session, err = info.s, info.e
|
||||
} else {
|
||||
session, err = store.New(s.request, name)
|
||||
session.name = name
|
||||
s.sessions[name] = sessionInfo{s: session, e: err}
|
||||
}
|
||||
session.store = store
|
||||
return
|
||||
}
|
||||
|
||||
// Save saves all sessions registered for the current request.
|
||||
func (s *Registry) Save(w http.ResponseWriter) error {
|
||||
var errMulti MultiError
|
||||
for name, info := range s.sessions {
|
||||
session := info.s
|
||||
if session.store == nil {
|
||||
errMulti = append(errMulti, fmt.Errorf(
|
||||
"sessions: missing store for session %q", name))
|
||||
} else if err := session.store.Save(s.request, w, session); err != nil {
|
||||
errMulti = append(errMulti, fmt.Errorf(
|
||||
"sessions: error saving session %q -- %v", name, err))
|
||||
}
|
||||
}
|
||||
if errMulti != nil {
|
||||
return errMulti
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helpers --------------------------------------------------------------------
|
||||
|
||||
func init() {
|
||||
gob.Register([]interface{}{})
|
||||
}
|
||||
|
||||
// Save saves all sessions used during the current request.
|
||||
func Save(r *http.Request, w http.ResponseWriter) error {
|
||||
return GetRegistry(r).Save(w)
|
||||
}
|
||||
|
||||
// NewCookie returns an http.Cookie with the options set. It also sets
|
||||
// the Expires field calculated based on the MaxAge value, for Internet
|
||||
// Explorer compatibility.
|
||||
func NewCookie(name, value string, options *Options) *http.Cookie {
|
||||
cookie := &http.Cookie{
|
||||
Name: name,
|
||||
Value: value,
|
||||
Path: options.Path,
|
||||
Domain: options.Domain,
|
||||
MaxAge: options.MaxAge,
|
||||
Secure: options.Secure,
|
||||
HttpOnly: options.HttpOnly,
|
||||
}
|
||||
if options.MaxAge > 0 {
|
||||
d := time.Duration(options.MaxAge) * time.Second
|
||||
cookie.Expires = time.Now().Add(d)
|
||||
} else if options.MaxAge < 0 {
|
||||
// Set it to the past to expire now.
|
||||
cookie.Expires = time.Unix(1, 0)
|
||||
}
|
||||
return cookie
|
||||
}
|
||||
|
||||
// Error ----------------------------------------------------------------------
|
||||
|
||||
// MultiError stores multiple errors.
|
||||
//
|
||||
// Borrowed from the App Engine SDK.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
|
@ -1,295 +0,0 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/securecookie"
|
||||
)
|
||||
|
||||
// Store is an interface for custom session stores.
|
||||
//
|
||||
// See CookieStore and FilesystemStore for examples.
|
||||
type Store interface {
|
||||
// Get should return a cached session.
|
||||
Get(r *http.Request, name string) (*Session, error)
|
||||
|
||||
// New should create and return a new session.
|
||||
//
|
||||
// Note that New should never return a nil session, even in the case of
|
||||
// an error if using the Registry infrastructure to cache the session.
|
||||
New(r *http.Request, name string) (*Session, error)
|
||||
|
||||
// Save should persist session to the underlying store implementation.
|
||||
Save(r *http.Request, w http.ResponseWriter, s *Session) error
|
||||
}
|
||||
|
||||
// CookieStore ----------------------------------------------------------------
|
||||
|
||||
// NewCookieStore returns a new CookieStore.
|
||||
//
|
||||
// Keys are defined in pairs to allow key rotation, but the common case is
|
||||
// to set a single authentication key and optionally an encryption key.
|
||||
//
|
||||
// The first key in a pair is used for authentication and the second for
|
||||
// encryption. The encryption key can be set to nil or omitted in the last
|
||||
// pair, but the authentication key is required in all pairs.
|
||||
//
|
||||
// It is recommended to use an authentication key with 32 or 64 bytes.
|
||||
// The encryption key, if set, must be either 16, 24, or 32 bytes to select
|
||||
// AES-128, AES-192, or AES-256 modes.
|
||||
//
|
||||
// Use the convenience function securecookie.GenerateRandomKey() to create
|
||||
// strong keys.
|
||||
func NewCookieStore(keyPairs ...[]byte) *CookieStore {
|
||||
cs := &CookieStore{
|
||||
Codecs: securecookie.CodecsFromPairs(keyPairs...),
|
||||
Options: &Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 30,
|
||||
},
|
||||
}
|
||||
|
||||
cs.MaxAge(cs.Options.MaxAge)
|
||||
return cs
|
||||
}
|
||||
|
||||
// CookieStore stores sessions using secure cookies.
|
||||
type CookieStore struct {
|
||||
Codecs []securecookie.Codec
|
||||
Options *Options // default configuration
|
||||
}
|
||||
|
||||
// Get returns a session for the given name after adding it to the registry.
|
||||
//
|
||||
// It returns a new session if the sessions doesn't exist. Access IsNew on
|
||||
// the session to check if it is an existing session or a new one.
|
||||
//
|
||||
// It returns a new session and an error if the session exists but could
|
||||
// not be decoded.
|
||||
func (s *CookieStore) Get(r *http.Request, name string) (*Session, error) {
|
||||
return GetRegistry(r).Get(s, name)
|
||||
}
|
||||
|
||||
// New returns a session for the given name without adding it to the registry.
|
||||
//
|
||||
// The difference between New() and Get() is that calling New() twice will
|
||||
// decode the session data twice, while Get() registers and reuses the same
|
||||
// decoded session after the first call.
|
||||
func (s *CookieStore) New(r *http.Request, name string) (*Session, error) {
|
||||
session := NewSession(s, name)
|
||||
opts := *s.Options
|
||||
session.Options = &opts
|
||||
session.IsNew = true
|
||||
var err error
|
||||
if c, errCookie := r.Cookie(name); errCookie == nil {
|
||||
err = securecookie.DecodeMulti(name, c.Value, &session.Values,
|
||||
s.Codecs...)
|
||||
if err == nil {
|
||||
session.IsNew = false
|
||||
}
|
||||
}
|
||||
return session, err
|
||||
}
|
||||
|
||||
// Save adds a single session to the response.
|
||||
func (s *CookieStore) Save(r *http.Request, w http.ResponseWriter,
|
||||
session *Session) error {
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.Values,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaxAge sets the maximum age for the store and the underlying cookie
|
||||
// implementation. Individual sessions can be deleted by setting Options.MaxAge
|
||||
// = -1 for that session.
|
||||
func (s *CookieStore) MaxAge(age int) {
|
||||
s.Options.MaxAge = age
|
||||
|
||||
// Set the maxAge for each securecookie instance.
|
||||
for _, codec := range s.Codecs {
|
||||
if sc, ok := codec.(*securecookie.SecureCookie); ok {
|
||||
sc.MaxAge(age)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FilesystemStore ------------------------------------------------------------
|
||||
|
||||
var fileMutex sync.RWMutex
|
||||
|
||||
// NewFilesystemStore returns a new FilesystemStore.
|
||||
//
|
||||
// The path argument is the directory where sessions will be saved. If empty
|
||||
// it will use os.TempDir().
|
||||
//
|
||||
// See NewCookieStore() for a description of the other parameters.
|
||||
func NewFilesystemStore(path string, keyPairs ...[]byte) *FilesystemStore {
|
||||
if path == "" {
|
||||
path = os.TempDir()
|
||||
}
|
||||
fs := &FilesystemStore{
|
||||
Codecs: securecookie.CodecsFromPairs(keyPairs...),
|
||||
Options: &Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 30,
|
||||
},
|
||||
path: path,
|
||||
}
|
||||
|
||||
fs.MaxAge(fs.Options.MaxAge)
|
||||
return fs
|
||||
}
|
||||
|
||||
// FilesystemStore stores sessions in the filesystem.
|
||||
//
|
||||
// It also serves as a reference for custom stores.
|
||||
//
|
||||
// This store is still experimental and not well tested. Feedback is welcome.
|
||||
type FilesystemStore struct {
|
||||
Codecs []securecookie.Codec
|
||||
Options *Options // default configuration
|
||||
path string
|
||||
}
|
||||
|
||||
// MaxLength restricts the maximum length of new sessions to l.
|
||||
// If l is 0 there is no limit to the size of a session, use with caution.
|
||||
// The default for a new FilesystemStore is 4096.
|
||||
func (s *FilesystemStore) MaxLength(l int) {
|
||||
for _, c := range s.Codecs {
|
||||
if codec, ok := c.(*securecookie.SecureCookie); ok {
|
||||
codec.MaxLength(l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a session for the given name after adding it to the registry.
|
||||
//
|
||||
// See CookieStore.Get().
|
||||
func (s *FilesystemStore) Get(r *http.Request, name string) (*Session, error) {
|
||||
return GetRegistry(r).Get(s, name)
|
||||
}
|
||||
|
||||
// New returns a session for the given name without adding it to the registry.
|
||||
//
|
||||
// See CookieStore.New().
|
||||
func (s *FilesystemStore) New(r *http.Request, name string) (*Session, error) {
|
||||
session := NewSession(s, name)
|
||||
opts := *s.Options
|
||||
session.Options = &opts
|
||||
session.IsNew = true
|
||||
var err error
|
||||
if c, errCookie := r.Cookie(name); errCookie == nil {
|
||||
err = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...)
|
||||
if err == nil {
|
||||
err = s.load(session)
|
||||
if err == nil {
|
||||
session.IsNew = false
|
||||
}
|
||||
}
|
||||
}
|
||||
return session, err
|
||||
}
|
||||
|
||||
// Save adds a single session to the response.
|
||||
//
|
||||
// If the Options.MaxAge of the session is <= 0 then the session file will be
|
||||
// deleted from the store path. With this process it enforces the properly
|
||||
// session cookie handling so no need to trust in the cookie management in the
|
||||
// web browser.
|
||||
func (s *FilesystemStore) Save(r *http.Request, w http.ResponseWriter,
|
||||
session *Session) error {
|
||||
// Delete if max-age is <= 0
|
||||
if session.Options.MaxAge <= 0 {
|
||||
if err := s.erase(session); err != nil {
|
||||
return err
|
||||
}
|
||||
http.SetCookie(w, NewCookie(session.Name(), "", session.Options))
|
||||
return nil
|
||||
}
|
||||
|
||||
if session.ID == "" {
|
||||
// Because the ID is used in the filename, encode it to
|
||||
// use alphanumeric characters only.
|
||||
session.ID = strings.TrimRight(
|
||||
base32.StdEncoding.EncodeToString(
|
||||
securecookie.GenerateRandomKey(32)), "=")
|
||||
}
|
||||
if err := s.save(session); err != nil {
|
||||
return err
|
||||
}
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.ID,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options))
|
||||
return nil
|
||||
}
|
||||
|
||||
// MaxAge sets the maximum age for the store and the underlying cookie
|
||||
// implementation. Individual sessions can be deleted by setting Options.MaxAge
|
||||
// = -1 for that session.
|
||||
func (s *FilesystemStore) MaxAge(age int) {
|
||||
s.Options.MaxAge = age
|
||||
|
||||
// Set the maxAge for each securecookie instance.
|
||||
for _, codec := range s.Codecs {
|
||||
if sc, ok := codec.(*securecookie.SecureCookie); ok {
|
||||
sc.MaxAge(age)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// save writes encoded session.Values to a file.
|
||||
func (s *FilesystemStore) save(session *Session) error {
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.Values,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := filepath.Join(s.path, "session_"+session.ID)
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
return ioutil.WriteFile(filename, []byte(encoded), 0600)
|
||||
}
|
||||
|
||||
// load reads a file and decodes its content into session.Values.
|
||||
func (s *FilesystemStore) load(session *Session) error {
|
||||
filename := filepath.Join(s.path, "session_"+session.ID)
|
||||
fileMutex.RLock()
|
||||
defer fileMutex.RUnlock()
|
||||
fdata, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = securecookie.DecodeMulti(session.Name(), string(fdata),
|
||||
&session.Values, s.Codecs...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// delete session file
|
||||
func (s *FilesystemStore) erase(session *Session) error {
|
||||
filename := filepath.Join(s.path, "session_"+session.ID)
|
||||
|
||||
fileMutex.RLock()
|
||||
defer fileMutex.RUnlock()
|
||||
|
||||
err := os.Remove(filename)
|
||||
return err
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,64 +0,0 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket)
|
||||
[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket)
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
|
@ -1,392 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
// parseURL parses the URL.
|
||||
//
|
||||
// This function is a replacement for the standard library url.Parse function.
|
||||
// In Go 1.4 and earlier, url.Parse loses information from the path.
|
||||
func parseURL(s string) (*url.URL, error) {
|
||||
// From the RFC:
|
||||
//
|
||||
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
|
||||
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
|
||||
var u url.URL
|
||||
switch {
|
||||
case strings.HasPrefix(s, "ws://"):
|
||||
u.Scheme = "ws"
|
||||
s = s[len("ws://"):]
|
||||
case strings.HasPrefix(s, "wss://"):
|
||||
u.Scheme = "wss"
|
||||
s = s[len("wss://"):]
|
||||
default:
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "?"); i >= 0 {
|
||||
u.RawQuery = s[i+1:]
|
||||
s = s[:i]
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "/"); i >= 0 {
|
||||
u.Opaque = s[i:]
|
||||
s = s[:i]
|
||||
} else {
|
||||
u.Opaque = "/"
|
||||
}
|
||||
|
||||
u.Host = s
|
||||
|
||||
if strings.Contains(u.Host, "@") {
|
||||
// Don't bother parsing user information because user information is
|
||||
// not allowed in websocket URIs.
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default zero values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
|
||||
// Dial creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
|
||||
if d == nil {
|
||||
d = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := parseURL(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
|
||||
var proxyURL *url.URL
|
||||
// Check wether the proxy method has been configured
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err = d.Proxy(req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var targetHostPort string
|
||||
if proxyURL != nil {
|
||||
targetHostPort, _ = hostPortNoPort(proxyURL)
|
||||
} else {
|
||||
targetHostPort = hostPort
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if d.HandshakeTimeout != 0 {
|
||||
deadline = time.Now().Add(d.HandshakeTimeout)
|
||||
}
|
||||
|
||||
netDial := d.NetDial
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{Deadline: deadline}
|
||||
netDial = netDialer.Dial
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", targetHostPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := netConn.SetDeadline(deadline); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if proxyURL != nil {
|
||||
connectHeader := make(http.Header)
|
||||
if user := proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: hostPort},
|
||||
Host: hostPort,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
connectReq.Write(netConn)
|
||||
|
||||
// Read response.
|
||||
// Okay to use and discard buffered reader here, because
|
||||
// TLS server will not speak until spoken to.
|
||||
br := bufio.NewReader(netConn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, nil, errors.New(f[1])
|
||||
}
|
||||
}
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.5
|
||||
|
||||
package websocket
|
||||
|
||||
import "io"
|
||||
|
||||
func (c *Conn) read(n int) ([]byte, error) {
|
||||
p, err := c.br.Peek(n)
|
||||
if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
c.br.Discard(len(p))
|
||||
return p, err
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.5
|
||||
|
||||
package websocket
|
||||
|
||||
import "io"
|
||||
|
||||
func (c *Conn) read(n int) ([]byte, error) {
|
||||
p, err := c.br.Peek(n)
|
||||
if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
if len(p) > 0 {
|
||||
// advance over the bytes just read
|
||||
io.ReadFull(c.br, p)
|
||||
}
|
||||
return p, err
|
||||
}
|
|
@ -1,180 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application uses
|
||||
// the Upgrade function from an Upgrader object with a HTTP request handler
|
||||
// to get a pointer to a Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// if err = conn.WriteMessage(messageType, p); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by sending a close message to the
|
||||
// peer and returning a *CloseError from the the NextReader, ReadMessage or the
|
||||
// message Read method.
|
||||
//
|
||||
// Connections handle received ping and pong messages by invoking callback
|
||||
// functions set with SetPingHandler and SetPongHandler methods. The callback
|
||||
// functions are called from the NextReader, ReadMessage and the message Read
|
||||
// methods.
|
||||
//
|
||||
// The default ping handler sends a pong to the peer. The application's reading
|
||||
// goroutine can block for a short time while the handler writes the pong data
|
||||
// to the connection.
|
||||
//
|
||||
// The application must read the connection to process ping, pong and close
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and not equal to the
|
||||
// Host request header.
|
||||
//
|
||||
// An application can allow connections from any origin by specifying a
|
||||
// function that always returns true:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// CheckOrigin: func(r *http.Request) bool { return true },
|
||||
// }
|
||||
//
|
||||
// The deprecated Upgrade function does not enforce an origin policy. It's the
|
||||
// application's responsibility to check the Origin header before calling
|
||||
// Upgrade.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON is deprecated, use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v to the connection.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON is deprecated, use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
err error
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
|
@ -1,291 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is set, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client.
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, the host in the Origin header must not be set or
|
||||
// must match the host of the request.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return u.Host == r.Host
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
netConn net.Conn
|
||||
err error
|
||||
)
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err = h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-Websocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// This function is deprecated, use websocket.Upgrader instead.
|
||||
//
|
||||
// The application is responsible for checking the request origin before
|
||||
// calling Upgrade. An example implementation of the same origin policy is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", 403)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
var octetTypes [256]byte
|
||||
|
||||
const (
|
||||
isTokenOctet = 1 << iota
|
||||
isSpaceOctet
|
||||
)
|
||||
|
||||
func init() {
|
||||
// From RFC 2616
|
||||
//
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t byte
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpaceOctet
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isTokenOctet
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpaceOctet == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isTokenOctet == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j += 1
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j += 1
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains token.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if strings.EqualFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensiosn parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
Copyright (C) 2010 nsf <no.smile.face@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,195 +0,0 @@
|
|||
## An autocompletion daemon for the Go programming language
|
||||
|
||||
Gocode is a helper tool which is intended to be integrated with your source code editor, like vim, neovim and emacs. It provides several advanced capabilities, which currently includes:
|
||||
|
||||
- Context-sensitive autocompletion
|
||||
|
||||
It is called *daemon*, because it uses client/server architecture for caching purposes. In particular, it makes autocompletions very fast. Typical autocompletion time with warm cache is 30ms, which is barely noticeable.
|
||||
|
||||
Also watch the [demo screencast](http://nosmileface.ru/images/gocode-demo.swf).
|
||||
|
||||
![Gocode in vim](http://nosmileface.ru/images/gocode-screenshot.png)
|
||||
|
||||
![Gocode in emacs](http://nosmileface.ru/images/emacs-gocode.png)
|
||||
|
||||
### Setup
|
||||
|
||||
1. You should have a correctly installed Go compiler environment and your personal workspace ($GOPATH). If you have no idea what **$GOPATH** is, take a look [here](http://golang.org/doc/code.html). Please make sure that your **$GOPATH/bin** is available in your **$PATH**. This is important, because most editors assume that **gocode** binary is available in one of the directories, specified by your **$PATH** environment variable. Otherwise manually copy the **gocode** binary from **$GOPATH/bin** to a location which is part of your **$PATH** after getting it in step 2.
|
||||
|
||||
Do these steps only if you understand why you need to do them:
|
||||
|
||||
`export GOPATH=$HOME/goprojects`
|
||||
|
||||
`export PATH=$PATH:$GOPATH/bin`
|
||||
|
||||
2. Then you need to get the appropriate version of the gocode, for 6g/8g/5g compiler you can do this:
|
||||
|
||||
`go get -u github.com/nsf/gocode` (-u flag for "update")
|
||||
|
||||
Windows users should consider doing this instead:
|
||||
|
||||
`go get -u -ldflags -H=windowsgui github.com/nsf/gocode`
|
||||
|
||||
That way on the Windows OS gocode will be built as a GUI application and doing so solves hanging window issues with some of the editors.
|
||||
|
||||
3. Next steps are editor specific. See below.
|
||||
|
||||
### Vim setup
|
||||
|
||||
#### Vim manual installation
|
||||
|
||||
Note: As of go 1.5 there is no $GOROOT/misc/vim script. Suggested installation is via [vim-go plugin](https://github.com/fatih/vim-go).
|
||||
|
||||
In order to install vim scripts, you need to fulfill the following steps:
|
||||
|
||||
1. Install official Go vim scripts from **$GOROOT/misc/vim**. If you did that already, proceed to the step 2.
|
||||
|
||||
2. Install gocode vim scripts. Usually it's enough to do the following:
|
||||
|
||||
2.1. `vim/update.sh`
|
||||
|
||||
**update.sh** script does the following:
|
||||
|
||||
#!/bin/sh
|
||||
mkdir -p "$HOME/.vim/autoload"
|
||||
mkdir -p "$HOME/.vim/ftplugin/go"
|
||||
cp "${0%/*}/autoload/gocomplete.vim" "$HOME/.vim/autoload"
|
||||
cp "${0%/*}/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go"
|
||||
|
||||
2.2. Alternatively, you can create symlinks using symlink.sh script in order to avoid running update.sh after every gocode update.
|
||||
|
||||
**symlink.sh** script does the following:
|
||||
|
||||
#!/bin/sh
|
||||
cd "${0%/*}"
|
||||
ROOTDIR=`pwd`
|
||||
mkdir -p "$HOME/.vim/autoload"
|
||||
mkdir -p "$HOME/.vim/ftplugin/go"
|
||||
ln -s "$ROOTDIR/autoload/gocomplete.vim" "$HOME/.vim/autoload/"
|
||||
ln -s "$ROOTDIR/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go/"
|
||||
|
||||
3. Make sure vim has filetype plugin enabled. Simply add that to your **.vimrc**:
|
||||
|
||||
`filetype plugin on`
|
||||
|
||||
4. Autocompletion should work now. Use `<C-x><C-o>` for autocompletion (omnifunc autocompletion).
|
||||
|
||||
#### Using Vundle in Vim
|
||||
|
||||
Add the following line to your **.vimrc**:
|
||||
|
||||
`Plugin 'nsf/gocode', {'rtp': 'vim/'}`
|
||||
|
||||
And then update your packages by running `:PluginInstall`.
|
||||
|
||||
#### Using vim-plug in Vim
|
||||
|
||||
Add the following line to your **.vimrc**:
|
||||
|
||||
`Plug 'nsf/gocode', { 'rtp': 'vim', 'do': '~/.vim/plugged/gocode/vim/symlink.sh' }`
|
||||
|
||||
And then update your packages by running `:PlugInstall`.
|
||||
|
||||
#### Other
|
||||
|
||||
Alternatively take a look at the vundle/pathogen friendly repo: https://github.com/Blackrush/vim-gocode.
|
||||
|
||||
### Neovim setup
|
||||
#### Neovim manual installation
|
||||
|
||||
Neovim users should also follow `Vim manual installation`, except that you should goto `gocode/nvim` in step 2, and remember that, the Neovim configuration file is `~/.config/nvim/init.vim`.
|
||||
|
||||
#### Using Vundle in Neovim
|
||||
|
||||
Add the following line to your **init.vim**:
|
||||
|
||||
`Plugin 'nsf/gocode', {'rtp': 'nvim/'}`
|
||||
|
||||
And then update your packages by running `:PluginInstall`.
|
||||
|
||||
#### Using vim-plug in Neovim
|
||||
|
||||
Add the following line to your **init.vim**:
|
||||
|
||||
`Plug 'nsf/gocode', { 'rtp': 'nvim', 'do': '~/.config/nvim/plugged/gocode/nvim/symlink.sh' }`
|
||||
|
||||
And then update your packages by running `:PlugInstall`.
|
||||
|
||||
### Emacs setup
|
||||
|
||||
In order to install emacs script, you need to fulfill the following steps:
|
||||
|
||||
1. Install [auto-complete-mode](http://www.emacswiki.org/emacs/AutoComplete)
|
||||
|
||||
2. Copy **emacs/go-autocomplete.el** file from the gocode source distribution to a directory which is in your 'load-path' in emacs.
|
||||
|
||||
3. Add these lines to your **.emacs**:
|
||||
|
||||
(require 'go-autocomplete)
|
||||
(require 'auto-complete-config)
|
||||
(ac-config-default)
|
||||
|
||||
Also, there is an alternative plugin for emacs using company-mode. See `emacs-company/README` for installation instructions.
|
||||
|
||||
If you're a MacOSX user, you may find that script useful: https://github.com/purcell/exec-path-from-shell. It helps you with setting up the right environment variables as Go and gocode require it. By default it pulls the PATH, but don't forget to add the GOPATH as well, e.g.:
|
||||
|
||||
```
|
||||
(when (memq window-system '(mac ns))
|
||||
(exec-path-from-shell-initialize)
|
||||
(exec-path-from-shell-copy-env "GOPATH"))
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
You can change all available options using `gocode set` command. The config file uses json format and is usually stored somewhere in **~/.config/gocode** directory. On windows it's stored in the appropriate AppData folder. It's suggested to avoid modifying config file manually, do that using the `gocode set` command.
|
||||
|
||||
`gocode set` lists all options and their values.
|
||||
|
||||
`gocode set <option>` shows the value of that *option*.
|
||||
|
||||
`gocode set <option> <value>` sets the new *value* for that *option*.
|
||||
|
||||
- *propose-builtins*
|
||||
|
||||
A boolean option. If **true**, gocode will add built-in types, functions and constants to an autocompletion proposals. Default: **false**.
|
||||
|
||||
- *lib-path*
|
||||
|
||||
A string option. Allows you to add search paths for packages. By default, gocode only searches **$GOPATH/pkg/$GOOS_$GOARCH** and **$GOROOT/pkg/$GOOS_$GOARCH** in terms of previously existed environment variables. Also you can specify multiple paths using ':' (colon) as a separator (on Windows use semicolon ';'). The paths specified by *lib-path* are prepended to the default ones.
|
||||
|
||||
- *autobuild*
|
||||
|
||||
A boolean option. If **true**, gocode will try to automatically build out-of-date packages when their source files are modified, in order to obtain the freshest autocomplete results for them. This feature is experimental. Default: **false**.
|
||||
|
||||
- *force-debug-output*
|
||||
|
||||
A string option. If is not empty, gocode will forcefully redirect the logging into that file. Also forces enabling of the debug mode on the server side. Default: "" (empty).
|
||||
|
||||
- *package-lookup-mode*
|
||||
|
||||
A string option. If **go**, use standard Go package lookup rules. If **gb**, use gb-specific lookup rules. See https://github.com/constabulary/gb for details. Default: **go**.
|
||||
|
||||
- *close-timeout*
|
||||
|
||||
An integer option. If there have been no completion requests after this number of seconds, the gocode process will terminate. Defaults to 1800 (30 minutes).
|
||||
|
||||
### Debugging
|
||||
|
||||
If something went wrong, the first thing you may want to do is manually start the gocode daemon with a debug mode enabled and in a separate terminal window. It will show you all the stack traces, panics if any and additional info about autocompletion requests. Shutdown the daemon if it was already started and run a new one explicitly with a debug mode enabled:
|
||||
|
||||
`gocode close`
|
||||
|
||||
`gocode -s -debug`
|
||||
|
||||
Please, report bugs, feature suggestions and other rants to the [github issue tracker](http://github.com/nsf/gocode/issues) of this project.
|
||||
|
||||
### Developing
|
||||
|
||||
There is [Guide for IDE/editor plugin developers](docs/IDE_integration.md).
|
||||
|
||||
If you have troubles, please, contact me and I will try to do my best answering your questions. You can contact me via <a href="mailto:no.smile.face@gmail.com">email</a>. Or for short question find me on IRC: #go-nuts @ freenode.
|
||||
|
||||
### Misc
|
||||
|
||||
- It's a good idea to use the latest git version always. I'm trying to keep it in a working state.
|
||||
- Use `go install` (not `go build`) for building a local source tree. The objects in `pkg/` are needed for Gocode to work.
|
|
@ -1,689 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// out_buffers
|
||||
//
|
||||
// Temporary structure for writing autocomplete response.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
// fields must be exported for RPC
|
||||
type candidate struct {
|
||||
Name string
|
||||
Type string
|
||||
Class decl_class
|
||||
}
|
||||
|
||||
type out_buffers struct {
|
||||
tmpbuf *bytes.Buffer
|
||||
candidates []candidate
|
||||
ctx *auto_complete_context
|
||||
tmpns map[string]bool
|
||||
ignorecase bool
|
||||
}
|
||||
|
||||
func new_out_buffers(ctx *auto_complete_context) *out_buffers {
|
||||
b := new(out_buffers)
|
||||
b.tmpbuf = bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.candidates = make([]candidate, 0, 64)
|
||||
b.ctx = ctx
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *out_buffers) Len() int {
|
||||
return len(b.candidates)
|
||||
}
|
||||
|
||||
func (b *out_buffers) Less(i, j int) bool {
|
||||
x := b.candidates[i]
|
||||
y := b.candidates[j]
|
||||
if x.Class == y.Class {
|
||||
return x.Name < y.Name
|
||||
}
|
||||
return x.Class < y.Class
|
||||
}
|
||||
|
||||
func (b *out_buffers) Swap(i, j int) {
|
||||
b.candidates[i], b.candidates[j] = b.candidates[j], b.candidates[i]
|
||||
}
|
||||
|
||||
func (b *out_buffers) append_decl(p, name string, decl *decl, class decl_class) {
|
||||
c1 := !g_config.ProposeBuiltins && decl.scope == g_universe_scope && decl.name != "Error"
|
||||
c2 := class != decl_invalid && decl.class != class
|
||||
c3 := class == decl_invalid && !has_prefix(name, p, b.ignorecase)
|
||||
c4 := !decl.matches()
|
||||
c5 := !check_type_expr(decl.typ)
|
||||
|
||||
if c1 || c2 || c3 || c4 || c5 {
|
||||
return
|
||||
}
|
||||
|
||||
decl.pretty_print_type(b.tmpbuf)
|
||||
b.candidates = append(b.candidates, candidate{
|
||||
Name: name,
|
||||
Type: b.tmpbuf.String(),
|
||||
Class: decl.class,
|
||||
})
|
||||
b.tmpbuf.Reset()
|
||||
}
|
||||
|
||||
func (b *out_buffers) append_embedded(p string, decl *decl, class decl_class) {
|
||||
if decl.embedded == nil {
|
||||
return
|
||||
}
|
||||
|
||||
first_level := false
|
||||
if b.tmpns == nil {
|
||||
// first level, create tmp namespace
|
||||
b.tmpns = make(map[string]bool)
|
||||
first_level = true
|
||||
|
||||
// add all children of the current decl to the namespace
|
||||
for _, c := range decl.children {
|
||||
b.tmpns[c.name] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, emb := range decl.embedded {
|
||||
typedecl := type_to_decl(emb, decl.scope)
|
||||
if typedecl == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// prevent infinite recursion here
|
||||
if typedecl.flags&decl_visited != 0 {
|
||||
continue
|
||||
}
|
||||
typedecl.flags |= decl_visited
|
||||
defer typedecl.clear_visited()
|
||||
|
||||
for _, c := range typedecl.children {
|
||||
if _, has := b.tmpns[c.name]; has {
|
||||
continue
|
||||
}
|
||||
b.append_decl(p, c.name, c, class)
|
||||
b.tmpns[c.name] = true
|
||||
}
|
||||
b.append_embedded(p, typedecl, class)
|
||||
}
|
||||
|
||||
if first_level {
|
||||
// remove tmp namespace
|
||||
b.tmpns = nil
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// auto_complete_context
|
||||
//
|
||||
// Context that holds cache structures for autocompletion needs. It
|
||||
// includes cache for packages and for main package files.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type auto_complete_context struct {
|
||||
current *auto_complete_file // currently edited file
|
||||
others []*decl_file_cache // other files of the current package
|
||||
pkg *scope
|
||||
|
||||
pcache package_cache // packages cache
|
||||
declcache *decl_cache // top-level declarations cache
|
||||
}
|
||||
|
||||
func new_auto_complete_context(pcache package_cache, declcache *decl_cache) *auto_complete_context {
|
||||
c := new(auto_complete_context)
|
||||
c.current = new_auto_complete_file("", declcache.context)
|
||||
c.pcache = pcache
|
||||
c.declcache = declcache
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) update_caches() {
|
||||
// temporary map for packages that we need to check for a cache expiration
|
||||
// map is used as a set of unique items to prevent double checks
|
||||
ps := make(map[string]*package_file_cache)
|
||||
|
||||
// collect import information from all of the files
|
||||
c.pcache.append_packages(ps, c.current.packages)
|
||||
c.others = get_other_package_files(c.current.name, c.current.package_name, c.declcache)
|
||||
for _, other := range c.others {
|
||||
c.pcache.append_packages(ps, other.packages)
|
||||
}
|
||||
|
||||
update_packages(ps)
|
||||
|
||||
// fix imports for all files
|
||||
fixup_packages(c.current.filescope, c.current.packages, c.pcache)
|
||||
for _, f := range c.others {
|
||||
fixup_packages(f.filescope, f.packages, c.pcache)
|
||||
}
|
||||
|
||||
// At this point we have collected all top level declarations, now we need to
|
||||
// merge them in the common package block.
|
||||
c.merge_decls()
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) merge_decls() {
|
||||
c.pkg = new_scope(g_universe_scope)
|
||||
merge_decls(c.current.filescope, c.pkg, c.current.decls)
|
||||
merge_decls_from_packages(c.pkg, c.current.packages, c.pcache)
|
||||
for _, f := range c.others {
|
||||
merge_decls(f.filescope, c.pkg, f.decls)
|
||||
merge_decls_from_packages(c.pkg, f.packages, c.pcache)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) make_decl_set(scope *scope) map[string]*decl {
|
||||
set := make(map[string]*decl, len(c.pkg.entities)*2)
|
||||
make_decl_set_recursive(set, scope)
|
||||
return set
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_candidates_from_set(set map[string]*decl, partial string, class decl_class, b *out_buffers) {
|
||||
for key, value := range set {
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
value.infer_type()
|
||||
b.append_decl(partial, key, value, class)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_candidates_from_decl(cc cursor_context, class decl_class, b *out_buffers) {
|
||||
// propose all children of a subject declaration and
|
||||
for _, decl := range cc.decl.children {
|
||||
if cc.decl.class == decl_package && !ast.IsExported(decl.name) {
|
||||
continue
|
||||
}
|
||||
if cc.struct_field {
|
||||
// if we're autocompleting struct field init, skip all methods
|
||||
if _, ok := decl.typ.(*ast.FuncType); ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
b.append_decl(cc.partial, decl.name, decl, class)
|
||||
}
|
||||
// propose all children of an underlying struct/interface type
|
||||
adecl := advance_to_struct_or_interface(cc.decl)
|
||||
if adecl != nil && adecl != cc.decl {
|
||||
for _, decl := range adecl.children {
|
||||
if decl.class == decl_var {
|
||||
b.append_decl(cc.partial, decl.name, decl, class)
|
||||
}
|
||||
}
|
||||
}
|
||||
// propose all children of its embedded types
|
||||
b.append_embedded(cc.partial, cc.decl, class)
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_import_candidates(partial string, b *out_buffers) {
|
||||
pkgdirs := g_daemon.context.pkg_dirs()
|
||||
resultSet := map[string]struct{}{}
|
||||
for _, pkgdir := range pkgdirs {
|
||||
// convert srcpath to pkgpath and get candidates
|
||||
get_import_candidates_dir(pkgdir, filepath.FromSlash(partial), b.ignorecase, resultSet)
|
||||
}
|
||||
for k := range resultSet {
|
||||
b.candidates = append(b.candidates, candidate{Name: k, Class: decl_import})
|
||||
}
|
||||
}
|
||||
|
||||
func get_import_candidates_dir(root, partial string, ignorecase bool, r map[string]struct{}) {
|
||||
var fpath string
|
||||
var match bool
|
||||
if strings.HasSuffix(partial, "/") {
|
||||
fpath = filepath.Join(root, partial)
|
||||
} else {
|
||||
fpath = filepath.Join(root, filepath.Dir(partial))
|
||||
match = true
|
||||
}
|
||||
fi := readdir(fpath)
|
||||
for i := range fi {
|
||||
name := fi[i].Name()
|
||||
rel, err := filepath.Rel(root, filepath.Join(fpath, name))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if match && !has_prefix(rel, partial, ignorecase) {
|
||||
continue
|
||||
} else if fi[i].IsDir() {
|
||||
get_import_candidates_dir(root, rel+string(filepath.Separator), ignorecase, r)
|
||||
} else {
|
||||
ext := filepath.Ext(name)
|
||||
if ext != ".a" {
|
||||
continue
|
||||
} else {
|
||||
rel = rel[0 : len(rel)-2]
|
||||
}
|
||||
r[vendorlessImportPath(filepath.ToSlash(rel))] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns three slices of the same length containing:
|
||||
// 1. apropos names
|
||||
// 2. apropos types (pretty-printed)
|
||||
// 3. apropos classes
|
||||
// and length of the part that should be replaced (if any)
|
||||
func (c *auto_complete_context) apropos(file []byte, filename string, cursor int) ([]candidate, int) {
|
||||
c.current.cursor = cursor
|
||||
c.current.name = filename
|
||||
|
||||
// Update caches and parse the current file.
|
||||
// This process is quite complicated, because I was trying to design it in a
|
||||
// concurrent fashion. Apparently I'm not really good at that. Hopefully
|
||||
// will be better in future.
|
||||
|
||||
// Ugly hack, but it actually may help in some cases. Insert a
|
||||
// semicolon right at the cursor location.
|
||||
filesemi := make([]byte, len(file)+1)
|
||||
copy(filesemi, file[:cursor])
|
||||
filesemi[cursor] = ';'
|
||||
copy(filesemi[cursor+1:], file[cursor:])
|
||||
|
||||
// Does full processing of the currently edited file (top-level declarations plus
|
||||
// active function).
|
||||
c.current.process_data(filesemi)
|
||||
|
||||
// Updates cache of other files and packages. See the function for details of
|
||||
// the process. At the end merges all the top-level declarations into the package
|
||||
// block.
|
||||
c.update_caches()
|
||||
|
||||
// And we're ready to Go. ;)
|
||||
|
||||
b := new_out_buffers(c)
|
||||
|
||||
partial := 0
|
||||
cc, ok := c.deduce_cursor_context(file, cursor)
|
||||
if !ok {
|
||||
var d *decl
|
||||
if ident, ok := cc.expr.(*ast.Ident); ok && g_config.UnimportedPackages {
|
||||
d = resolveKnownPackageIdent(ident.Name, c.current.name, c.current.context)
|
||||
}
|
||||
if d == nil {
|
||||
return nil, 0
|
||||
}
|
||||
cc.decl = d
|
||||
}
|
||||
|
||||
class := decl_invalid
|
||||
switch cc.partial {
|
||||
case "const":
|
||||
class = decl_const
|
||||
case "var":
|
||||
class = decl_var
|
||||
case "type":
|
||||
class = decl_type
|
||||
case "func":
|
||||
class = decl_func
|
||||
case "package":
|
||||
class = decl_package
|
||||
}
|
||||
|
||||
if cc.decl_import {
|
||||
c.get_import_candidates(cc.partial, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_import_candidates(cc.partial, b)
|
||||
}
|
||||
} else if cc.decl == nil {
|
||||
// In case if no declaraion is a subject of completion, propose all:
|
||||
set := c.make_decl_set(c.current.scope)
|
||||
c.get_candidates_from_set(set, cc.partial, class, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_candidates_from_set(set, cc.partial, class, b)
|
||||
}
|
||||
} else {
|
||||
c.get_candidates_from_decl(cc, class, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_candidates_from_decl(cc, class, b)
|
||||
}
|
||||
}
|
||||
partial = len(cc.partial)
|
||||
|
||||
if len(b.candidates) == 0 {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
sort.Sort(b)
|
||||
return b.candidates, partial
|
||||
}
|
||||
|
||||
func update_packages(ps map[string]*package_file_cache) {
|
||||
// initiate package cache update
|
||||
done := make(chan bool)
|
||||
for _, p := range ps {
|
||||
go func(p *package_file_cache) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
done <- false
|
||||
}
|
||||
}()
|
||||
p.update_cache()
|
||||
done <- true
|
||||
}(p)
|
||||
}
|
||||
|
||||
// wait for its completion
|
||||
for _ = range ps {
|
||||
if !<-done {
|
||||
panic("One of the package cache updaters panicked")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func merge_decls(filescope *scope, pkg *scope, decls map[string]*decl) {
|
||||
for _, d := range decls {
|
||||
pkg.merge_decl(d)
|
||||
}
|
||||
filescope.parent = pkg
|
||||
}
|
||||
|
||||
func merge_decls_from_packages(pkgscope *scope, pkgs []package_import, pcache package_cache) {
|
||||
for _, p := range pkgs {
|
||||
path, alias := p.path, p.alias
|
||||
if alias != "." {
|
||||
continue
|
||||
}
|
||||
p := pcache[path].main
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
for _, d := range p.children {
|
||||
if ast.IsExported(d.name) {
|
||||
pkgscope.merge_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fixup_packages(filescope *scope, pkgs []package_import, pcache package_cache) {
|
||||
for _, p := range pkgs {
|
||||
path, alias := p.path, p.alias
|
||||
if alias == "" {
|
||||
alias = pcache[path].defalias
|
||||
}
|
||||
// skip packages that will be merged to the package scope
|
||||
if alias == "." {
|
||||
continue
|
||||
}
|
||||
filescope.replace_decl(alias, pcache[path].main)
|
||||
}
|
||||
}
|
||||
|
||||
func get_other_package_files(filename, packageName string, declcache *decl_cache) []*decl_file_cache {
|
||||
others := find_other_package_files(filename, packageName)
|
||||
|
||||
ret := make([]*decl_file_cache, len(others))
|
||||
done := make(chan *decl_file_cache)
|
||||
|
||||
for _, nm := range others {
|
||||
go func(name string) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
done <- nil
|
||||
}
|
||||
}()
|
||||
done <- declcache.get_and_update(name)
|
||||
}(nm)
|
||||
}
|
||||
|
||||
for i := range others {
|
||||
ret[i] = <-done
|
||||
if ret[i] == nil {
|
||||
panic("One of the decl cache updaters panicked")
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func find_other_package_files(filename, package_name string) []string {
|
||||
if filename == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir, file := filepath.Split(filename)
|
||||
files_in_dir, err := readdir_lstat(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, stat := range files_in_dir {
|
||||
ok, _ := filepath.Match("*.go", stat.Name())
|
||||
if !ok || stat.Name() == file {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
out := make([]string, 0, count)
|
||||
for _, stat := range files_in_dir {
|
||||
const non_regular = os.ModeDir | os.ModeSymlink |
|
||||
os.ModeDevice | os.ModeNamedPipe | os.ModeSocket
|
||||
|
||||
ok, _ := filepath.Match("*.go", stat.Name())
|
||||
if !ok || stat.Name() == file || stat.Mode()&non_regular != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
abspath := filepath.Join(dir, stat.Name())
|
||||
if file_package_name(abspath) == package_name {
|
||||
n := len(out)
|
||||
out = out[:n+1]
|
||||
out[n] = abspath
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func file_package_name(filename string) string {
|
||||
file, _ := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
|
||||
return file.Name.Name
|
||||
}
|
||||
|
||||
func make_decl_set_recursive(set map[string]*decl, scope *scope) {
|
||||
for name, ent := range scope.entities {
|
||||
if _, ok := set[name]; !ok {
|
||||
set[name] = ent
|
||||
}
|
||||
}
|
||||
if scope.parent != nil {
|
||||
make_decl_set_recursive(set, scope.parent)
|
||||
}
|
||||
}
|
||||
|
||||
func check_func_field_list(f *ast.FieldList) bool {
|
||||
if f == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, field := range f.List {
|
||||
if !check_type_expr(field.Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// checks for a type expression correctness, it the type expression has
|
||||
// ast.BadExpr somewhere, returns false, otherwise true
|
||||
func check_type_expr(e ast.Expr) bool {
|
||||
switch t := e.(type) {
|
||||
case *ast.StarExpr:
|
||||
return check_type_expr(t.X)
|
||||
case *ast.ArrayType:
|
||||
return check_type_expr(t.Elt)
|
||||
case *ast.SelectorExpr:
|
||||
return check_type_expr(t.X)
|
||||
case *ast.FuncType:
|
||||
a := check_func_field_list(t.Params)
|
||||
b := check_func_field_list(t.Results)
|
||||
return a && b
|
||||
case *ast.MapType:
|
||||
a := check_type_expr(t.Key)
|
||||
b := check_type_expr(t.Value)
|
||||
return a && b
|
||||
case *ast.Ellipsis:
|
||||
return check_type_expr(t.Elt)
|
||||
case *ast.ChanType:
|
||||
return check_type_expr(t.Value)
|
||||
case *ast.BadExpr:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// Status output
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type decl_slice []*decl
|
||||
|
||||
func (s decl_slice) Less(i, j int) bool {
|
||||
if s[i].class != s[j].class {
|
||||
return s[i].name < s[j].name
|
||||
}
|
||||
return s[i].class < s[j].class
|
||||
}
|
||||
func (s decl_slice) Len() int { return len(s) }
|
||||
func (s decl_slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
const (
|
||||
color_red = "\033[0;31m"
|
||||
color_red_bold = "\033[1;31m"
|
||||
color_green = "\033[0;32m"
|
||||
color_green_bold = "\033[1;32m"
|
||||
color_yellow = "\033[0;33m"
|
||||
color_yellow_bold = "\033[1;33m"
|
||||
color_blue = "\033[0;34m"
|
||||
color_blue_bold = "\033[1;34m"
|
||||
color_magenta = "\033[0;35m"
|
||||
color_magenta_bold = "\033[1;35m"
|
||||
color_cyan = "\033[0;36m"
|
||||
color_cyan_bold = "\033[1;36m"
|
||||
color_white = "\033[0;37m"
|
||||
color_white_bold = "\033[1;37m"
|
||||
color_none = "\033[0m"
|
||||
)
|
||||
|
||||
var g_decl_class_to_color = [...]string{
|
||||
decl_const: color_white_bold,
|
||||
decl_var: color_magenta,
|
||||
decl_type: color_cyan,
|
||||
decl_func: color_green,
|
||||
decl_package: color_red,
|
||||
decl_methods_stub: color_red,
|
||||
}
|
||||
|
||||
var g_decl_class_to_string_status = [...]string{
|
||||
decl_const: " const",
|
||||
decl_var: " var",
|
||||
decl_type: " type",
|
||||
decl_func: " func",
|
||||
decl_package: "package",
|
||||
decl_methods_stub: " stub",
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) status() string {
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 4096))
|
||||
fmt.Fprintf(buf, "Server's GOMAXPROCS == %d\n", runtime.GOMAXPROCS(0))
|
||||
fmt.Fprintf(buf, "\nPackage cache contains %d entries\n", len(c.pcache))
|
||||
fmt.Fprintf(buf, "\nListing these entries:\n")
|
||||
for _, mod := range c.pcache {
|
||||
fmt.Fprintf(buf, "\tname: %s (default alias: %s)\n", mod.name, mod.defalias)
|
||||
fmt.Fprintf(buf, "\timports %d declarations and %d packages\n", len(mod.main.children), len(mod.others))
|
||||
if mod.mtime == -1 {
|
||||
fmt.Fprintf(buf, "\tthis package stays in cache forever (built-in package)\n")
|
||||
} else {
|
||||
mtime := time.Unix(0, mod.mtime)
|
||||
fmt.Fprintf(buf, "\tlast modification time: %s\n", mtime)
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
if c.current.name != "" {
|
||||
fmt.Fprintf(buf, "Last edited file: %s (package: %s)\n", c.current.name, c.current.package_name)
|
||||
if len(c.others) > 0 {
|
||||
fmt.Fprintf(buf, "\nOther files from the current package:\n")
|
||||
}
|
||||
for _, f := range c.others {
|
||||
fmt.Fprintf(buf, "\t%s\n", f.name)
|
||||
}
|
||||
fmt.Fprintf(buf, "\nListing declarations from files:\n")
|
||||
|
||||
const status_decls = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + "\n"
|
||||
const status_decls_children = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + " (%d)\n"
|
||||
|
||||
fmt.Fprintf(buf, "\n%s:\n", c.current.name)
|
||||
ds := make(decl_slice, len(c.current.decls))
|
||||
i := 0
|
||||
for _, d := range c.current.decls {
|
||||
ds[i] = d
|
||||
i++
|
||||
}
|
||||
sort.Sort(ds)
|
||||
for _, d := range ds {
|
||||
if len(d.children) > 0 {
|
||||
fmt.Fprintf(buf, status_decls_children,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name, len(d.children))
|
||||
} else {
|
||||
fmt.Fprintf(buf, status_decls,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range c.others {
|
||||
fmt.Fprintf(buf, "\n%s:\n", f.name)
|
||||
ds = make(decl_slice, len(f.decls))
|
||||
i = 0
|
||||
for _, d := range f.decls {
|
||||
ds[i] = d
|
||||
i++
|
||||
}
|
||||
sort.Sort(ds)
|
||||
for _, d := range ds {
|
||||
if len(d.children) > 0 {
|
||||
fmt.Fprintf(buf, status_decls_children,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name, len(d.children))
|
||||
} else {
|
||||
fmt.Fprintf(buf, status_decls,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -1,418 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"log"
|
||||
)
|
||||
|
||||
func parse_decl_list(fset *token.FileSet, data []byte) ([]ast.Decl, error) {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("package p;")
|
||||
buf.Write(data)
|
||||
file, err := parser.ParseFile(fset, "", buf.Bytes(), parser.AllErrors)
|
||||
if err != nil {
|
||||
return file.Decls, err
|
||||
}
|
||||
return file.Decls, nil
|
||||
}
|
||||
|
||||
func log_parse_error(intro string, err error) {
|
||||
if el, ok := err.(scanner.ErrorList); ok {
|
||||
log.Printf("%s:", intro)
|
||||
for _, er := range el {
|
||||
log.Printf(" %s", er)
|
||||
}
|
||||
} else {
|
||||
log.Printf("%s: %s", intro, err)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// auto_complete_file
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type auto_complete_file struct {
|
||||
name string
|
||||
package_name string
|
||||
|
||||
decls map[string]*decl
|
||||
packages []package_import
|
||||
filescope *scope
|
||||
scope *scope
|
||||
|
||||
cursor int // for current file buffer only
|
||||
fset *token.FileSet
|
||||
context *package_lookup_context
|
||||
}
|
||||
|
||||
func new_auto_complete_file(name string, context *package_lookup_context) *auto_complete_file {
|
||||
p := new(auto_complete_file)
|
||||
p.name = name
|
||||
p.cursor = -1
|
||||
p.fset = token.NewFileSet()
|
||||
p.context = context
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) offset(p token.Pos) int {
|
||||
const fixlen = len("package p;")
|
||||
return f.fset.Position(p).Offset - fixlen
|
||||
}
|
||||
|
||||
// this one is used for current file buffer exclusively
|
||||
func (f *auto_complete_file) process_data(data []byte) {
|
||||
cur, filedata, block := rip_off_decl(data, f.cursor)
|
||||
file, err := parser.ParseFile(f.fset, "", filedata, parser.AllErrors)
|
||||
if err != nil && *g_debug {
|
||||
log_parse_error("Error parsing input file (outer block)", err)
|
||||
}
|
||||
f.package_name = package_name(file)
|
||||
|
||||
f.decls = make(map[string]*decl)
|
||||
f.packages = collect_package_imports(f.name, file.Decls, f.context)
|
||||
f.filescope = new_scope(nil)
|
||||
f.scope = f.filescope
|
||||
|
||||
for _, d := range file.Decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
|
||||
// process all top-level declarations
|
||||
for _, decl := range file.Decls {
|
||||
append_to_top_decls(f.decls, decl, f.scope)
|
||||
}
|
||||
if block != nil {
|
||||
// process local function as top-level declaration
|
||||
decls, err := parse_decl_list(f.fset, block)
|
||||
if err != nil && *g_debug {
|
||||
log_parse_error("Error parsing input file (inner block)", err)
|
||||
}
|
||||
|
||||
for _, d := range decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
|
||||
for _, decl := range decls {
|
||||
append_to_top_decls(f.decls, decl, f.scope)
|
||||
}
|
||||
|
||||
// process function internals
|
||||
f.cursor = cur
|
||||
for _, decl := range decls {
|
||||
f.process_decl_locals(decl)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_decl_locals(decl ast.Decl) {
|
||||
switch t := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
if f.cursor_in(t.Body) {
|
||||
s := f.scope
|
||||
f.scope = new_scope(f.scope)
|
||||
|
||||
f.process_field_list(t.Recv, s)
|
||||
f.process_field_list(t.Type.Params, s)
|
||||
f.process_field_list(t.Type.Results, s)
|
||||
f.process_block_stmt(t.Body)
|
||||
}
|
||||
default:
|
||||
v := new(func_lit_visitor)
|
||||
v.ctx = f
|
||||
ast.Walk(v, decl)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_decl(decl ast.Decl) {
|
||||
if t, ok := decl.(*ast.GenDecl); ok && f.offset(t.TokPos) > f.cursor {
|
||||
return
|
||||
}
|
||||
prevscope := f.scope
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
if class != decl_type {
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
}
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, 0, typ, v, vi, prevscope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_block_stmt(block *ast.BlockStmt) {
|
||||
if block != nil && f.cursor_in(block) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
for _, stmt := range block.List {
|
||||
f.process_stmt(stmt)
|
||||
}
|
||||
|
||||
// hack to process all func literals
|
||||
v := new(func_lit_visitor)
|
||||
v.ctx = f
|
||||
ast.Walk(v, block)
|
||||
}
|
||||
}
|
||||
|
||||
type func_lit_visitor struct {
|
||||
ctx *auto_complete_file
|
||||
}
|
||||
|
||||
func (v *func_lit_visitor) Visit(node ast.Node) ast.Visitor {
|
||||
if t, ok := node.(*ast.FuncLit); ok && v.ctx.cursor_in(t.Body) {
|
||||
s := v.ctx.scope
|
||||
v.ctx.scope = new_scope(v.ctx.scope)
|
||||
|
||||
v.ctx.process_field_list(t.Type.Params, s)
|
||||
v.ctx.process_field_list(t.Type.Results, s)
|
||||
v.ctx.process_block_stmt(t.Body)
|
||||
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_stmt(stmt ast.Stmt) {
|
||||
switch t := stmt.(type) {
|
||||
case *ast.DeclStmt:
|
||||
f.process_decl(t.Decl)
|
||||
case *ast.AssignStmt:
|
||||
f.process_assign_stmt(t)
|
||||
case *ast.IfStmt:
|
||||
if f.cursor_in_if_head(t) {
|
||||
f.process_stmt(t.Init)
|
||||
} else if f.cursor_in_if_stmt(t) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
f.process_stmt(t.Init)
|
||||
f.process_block_stmt(t.Body)
|
||||
f.process_stmt(t.Else)
|
||||
}
|
||||
case *ast.BlockStmt:
|
||||
f.process_block_stmt(t)
|
||||
case *ast.RangeStmt:
|
||||
f.process_range_stmt(t)
|
||||
case *ast.ForStmt:
|
||||
if f.cursor_in_for_head(t) {
|
||||
f.process_stmt(t.Init)
|
||||
} else if f.cursor_in(t.Body) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(t.Init)
|
||||
f.process_block_stmt(t.Body)
|
||||
}
|
||||
case *ast.SwitchStmt:
|
||||
f.process_switch_stmt(t)
|
||||
case *ast.TypeSwitchStmt:
|
||||
f.process_type_switch_stmt(t)
|
||||
case *ast.SelectStmt:
|
||||
f.process_select_stmt(t)
|
||||
case *ast.LabeledStmt:
|
||||
f.process_stmt(t.Stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_select_stmt(a *ast.SelectStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
var last_cursor_after *ast.CommClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CommClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
|
||||
if last_cursor_after != nil {
|
||||
if last_cursor_after.Comm != nil {
|
||||
//if lastCursorAfter.Lhs != nil && lastCursorAfter.Tok == token.DEFINE {
|
||||
if astmt, ok := last_cursor_after.Comm.(*ast.AssignStmt); ok && astmt.Tok == token.DEFINE {
|
||||
vname := astmt.Lhs[0].(*ast.Ident).Name
|
||||
v := new_decl_var(vname, nil, astmt.Rhs[0], -1, prevscope)
|
||||
f.scope.add_named_decl(v)
|
||||
}
|
||||
}
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_type_switch_stmt(a *ast.TypeSwitchStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(a.Init)
|
||||
// type var
|
||||
var tv *decl
|
||||
if a, ok := a.Assign.(*ast.AssignStmt); ok {
|
||||
lhs := a.Lhs
|
||||
rhs := a.Rhs
|
||||
if lhs != nil && len(lhs) == 1 {
|
||||
tvname := lhs[0].(*ast.Ident).Name
|
||||
tv = new_decl_var(tvname, nil, rhs[0], -1, prevscope)
|
||||
}
|
||||
}
|
||||
|
||||
var last_cursor_after *ast.CaseClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
|
||||
if last_cursor_after != nil {
|
||||
if tv != nil {
|
||||
if last_cursor_after.List != nil && len(last_cursor_after.List) == 1 {
|
||||
tv.typ = last_cursor_after.List[0]
|
||||
tv.value = nil
|
||||
}
|
||||
f.scope.add_named_decl(tv)
|
||||
}
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_switch_stmt(a *ast.SwitchStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(a.Init)
|
||||
var last_cursor_after *ast.CaseClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
if last_cursor_after != nil {
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_range_stmt(a *ast.RangeStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
if a.Tok == token.DEFINE {
|
||||
if t, ok := a.Key.(*ast.Ident); ok {
|
||||
d := new_decl_var(t.Name, nil, a.X, 0, prevscope)
|
||||
if d != nil {
|
||||
d.flags |= decl_rangevar
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
|
||||
if a.Value != nil {
|
||||
if t, ok := a.Value.(*ast.Ident); ok {
|
||||
d := new_decl_var(t.Name, nil, a.X, 1, prevscope)
|
||||
if d != nil {
|
||||
d.flags |= decl_rangevar
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.process_block_stmt(a.Body)
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_assign_stmt(a *ast.AssignStmt) {
|
||||
if a.Tok != token.DEFINE || f.offset(a.TokPos) > f.cursor {
|
||||
return
|
||||
}
|
||||
|
||||
names := make([]*ast.Ident, len(a.Lhs))
|
||||
for i, name := range a.Lhs {
|
||||
id, ok := name.(*ast.Ident)
|
||||
if !ok {
|
||||
// something is wrong, just ignore the whole stmt
|
||||
return
|
||||
}
|
||||
names[i] = id
|
||||
}
|
||||
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
pack := decl_pack{names, nil, a.Rhs}
|
||||
for i, name := range pack.names {
|
||||
typ, v, vi := pack.type_value_index(i)
|
||||
d := new_decl_var(name.Name, typ, v, vi, prevscope)
|
||||
if d == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_field_list(field_list *ast.FieldList, s *scope) {
|
||||
if field_list != nil {
|
||||
decls := ast_field_list_to_decls(field_list, decl_var, 0, s, false)
|
||||
for _, d := range decls {
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_if_head(s *ast.IfStmt) bool {
|
||||
if f.cursor > f.offset(s.If) && f.cursor <= f.offset(s.Body.Lbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_if_stmt(s *ast.IfStmt) bool {
|
||||
if f.cursor > f.offset(s.If) {
|
||||
// magic -10 comes from auto_complete_file.offset method, see
|
||||
// len() expr in there
|
||||
if f.offset(s.End()) == -10 || f.cursor < f.offset(s.End()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_for_head(s *ast.ForStmt) bool {
|
||||
if f.cursor > f.offset(s.For) && f.cursor <= f.offset(s.Body.Lbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in(block *ast.BlockStmt) bool {
|
||||
if f.cursor == -1 || block == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if f.cursor > f.offset(block.Lbrace) && f.cursor <= f.offset(block.Rbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func do_client() int {
|
||||
addr := *g_addr
|
||||
if *g_sock == "unix" {
|
||||
addr = get_socket_filename()
|
||||
}
|
||||
|
||||
// client
|
||||
client, err := rpc.Dial(*g_sock, addr)
|
||||
if err != nil {
|
||||
if *g_sock == "unix" && file_exists(addr) {
|
||||
os.Remove(addr)
|
||||
}
|
||||
|
||||
err = try_run_server()
|
||||
if err != nil {
|
||||
fmt.Printf("%s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
client, err = try_to_connect(*g_sock, addr)
|
||||
if err != nil {
|
||||
fmt.Printf("%s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if flag.NArg() > 0 {
|
||||
switch flag.Arg(0) {
|
||||
case "autocomplete":
|
||||
cmd_auto_complete(client)
|
||||
case "close":
|
||||
cmd_close(client)
|
||||
case "status":
|
||||
cmd_status(client)
|
||||
case "drop-cache":
|
||||
cmd_drop_cache(client)
|
||||
case "set":
|
||||
cmd_set(client)
|
||||
default:
|
||||
fmt.Printf("unknown argument: %q, try running \"gocode -h\"\n", flag.Arg(0))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func try_run_server() error {
|
||||
path := get_executable_filename()
|
||||
args := []string{os.Args[0], "-s", "-sock", *g_sock, "-addr", *g_addr}
|
||||
cwd, _ := os.Getwd()
|
||||
|
||||
var err error
|
||||
stdin, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
procattr := os.ProcAttr{Dir: cwd, Env: os.Environ(), Files: []*os.File{stdin, stdout, stderr}}
|
||||
p, err := os.StartProcess(path, args, &procattr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.Release()
|
||||
}
|
||||
|
||||
func try_to_connect(network, address string) (client *rpc.Client, err error) {
|
||||
t := 0
|
||||
for {
|
||||
client, err = rpc.Dial(network, address)
|
||||
if err != nil && t < 1000 {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
t += 10
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prepare_file_filename_cursor() ([]byte, string, int) {
|
||||
var file []byte
|
||||
var err error
|
||||
|
||||
if *g_input != "" {
|
||||
file, err = ioutil.ReadFile(*g_input)
|
||||
} else {
|
||||
file, err = ioutil.ReadAll(os.Stdin)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var skipped int
|
||||
file, skipped = filter_out_shebang(file)
|
||||
|
||||
filename := *g_input
|
||||
cursor := -1
|
||||
|
||||
offset := ""
|
||||
switch flag.NArg() {
|
||||
case 2:
|
||||
offset = flag.Arg(1)
|
||||
case 3:
|
||||
filename = flag.Arg(1) // Override default filename
|
||||
offset = flag.Arg(2)
|
||||
}
|
||||
|
||||
if offset != "" {
|
||||
if offset[0] == 'c' || offset[0] == 'C' {
|
||||
cursor, _ = strconv.Atoi(offset[1:])
|
||||
cursor = char_to_byte_offset(file, cursor)
|
||||
} else {
|
||||
cursor, _ = strconv.Atoi(offset)
|
||||
}
|
||||
}
|
||||
|
||||
cursor -= skipped
|
||||
if filename != "" && !filepath.IsAbs(filename) {
|
||||
cwd, _ := os.Getwd()
|
||||
filename = filepath.Join(cwd, filename)
|
||||
}
|
||||
return file, filename, cursor
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// commands
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func cmd_status(c *rpc.Client) {
|
||||
fmt.Printf("%s\n", client_status(c, 0))
|
||||
}
|
||||
|
||||
func cmd_auto_complete(c *rpc.Client) {
|
||||
context := pack_build_context(&build.Default)
|
||||
file, filename, cursor := prepare_file_filename_cursor()
|
||||
f := get_formatter(*g_format)
|
||||
f.write_candidates(client_auto_complete(c, file, filename, cursor, context))
|
||||
}
|
||||
|
||||
func cmd_close(c *rpc.Client) {
|
||||
client_close(c, 0)
|
||||
}
|
||||
|
||||
func cmd_drop_cache(c *rpc.Client) {
|
||||
client_drop_cache(c, 0)
|
||||
}
|
||||
|
||||
func cmd_set(c *rpc.Client) {
|
||||
switch flag.NArg() {
|
||||
case 1:
|
||||
fmt.Print(client_set(c, "\x00", "\x00"))
|
||||
case 2:
|
||||
fmt.Print(client_set(c, flag.Arg(1), "\x00"))
|
||||
case 3:
|
||||
fmt.Print(client_set(c, flag.Arg(1), flag.Arg(2)))
|
||||
}
|
||||
}
|
|
@ -1,177 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// config
|
||||
//
|
||||
// Structure represents persistent config storage of the gocode daemon. Usually
|
||||
// the config is located somewhere in ~/.config/gocode directory.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type config struct {
|
||||
ProposeBuiltins bool `json:"propose-builtins"`
|
||||
LibPath string `json:"lib-path"`
|
||||
CustomPkgPrefix string `json:"custom-pkg-prefix"`
|
||||
CustomVendorDir string `json:"custom-vendor-dir"`
|
||||
Autobuild bool `json:"autobuild"`
|
||||
ForceDebugOutput string `json:"force-debug-output"`
|
||||
PackageLookupMode string `json:"package-lookup-mode"`
|
||||
CloseTimeout int `json:"close-timeout"`
|
||||
UnimportedPackages bool `json:"unimported-packages"`
|
||||
}
|
||||
|
||||
var g_config = config{
|
||||
ProposeBuiltins: false,
|
||||
LibPath: "",
|
||||
CustomPkgPrefix: "",
|
||||
Autobuild: false,
|
||||
ForceDebugOutput: "",
|
||||
PackageLookupMode: "go",
|
||||
CloseTimeout: 1800,
|
||||
UnimportedPackages: false,
|
||||
}
|
||||
|
||||
var g_string_to_bool = map[string]bool{
|
||||
"t": true,
|
||||
"true": true,
|
||||
"y": true,
|
||||
"yes": true,
|
||||
"on": true,
|
||||
"1": true,
|
||||
"f": false,
|
||||
"false": false,
|
||||
"n": false,
|
||||
"no": false,
|
||||
"off": false,
|
||||
"0": false,
|
||||
}
|
||||
|
||||
func set_value(v reflect.Value, value string) {
|
||||
switch t := v; t.Kind() {
|
||||
case reflect.Bool:
|
||||
v, ok := g_string_to_bool[value]
|
||||
if ok {
|
||||
t.SetBool(v)
|
||||
}
|
||||
case reflect.String:
|
||||
t.SetString(value)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
v, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
t.SetInt(v)
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err == nil {
|
||||
t.SetFloat(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func list_value(v reflect.Value, name string, w io.Writer) {
|
||||
switch t := v; t.Kind() {
|
||||
case reflect.Bool:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Bool())
|
||||
case reflect.String:
|
||||
fmt.Fprintf(w, "%s \"%v\"\n", name, t.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Int())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Float())
|
||||
}
|
||||
}
|
||||
|
||||
func (this *config) list() string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
name := typ.Field(i).Tag.Get("json")
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (this *config) list_option(name string) string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
nm := typ.Field(i).Tag.Get("json")
|
||||
if nm == name {
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (this *config) set_option(name, value string) string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
nm := typ.Field(i).Tag.Get("json")
|
||||
if nm == name {
|
||||
set_value(v, value)
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
}
|
||||
this.write()
|
||||
return buf.String()
|
||||
|
||||
}
|
||||
|
||||
func (this *config) value_and_type() (reflect.Value, reflect.Type) {
|
||||
v := reflect.ValueOf(this).Elem()
|
||||
return v, v.Type()
|
||||
}
|
||||
|
||||
func (this *config) write() error {
|
||||
data, err := json.Marshal(this)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure config dir exists
|
||||
dir := config_dir()
|
||||
if !file_exists(dir) {
|
||||
os.MkdirAll(dir, 0755)
|
||||
}
|
||||
|
||||
f, err := os.Create(config_file())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *config) read() error {
|
||||
data, err := ioutil.ReadFile(config_file())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(data, this)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,557 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"log"
|
||||
)
|
||||
|
||||
type cursor_context struct {
|
||||
decl *decl
|
||||
partial string
|
||||
struct_field bool
|
||||
decl_import bool
|
||||
|
||||
// store expression that was supposed to be deduced to "decl", however
|
||||
// if decl is nil, then deduction failed, we could try to resolve it to
|
||||
// unimported package instead
|
||||
expr ast.Expr
|
||||
}
|
||||
|
||||
type token_iterator struct {
|
||||
tokens []token_item
|
||||
token_index int
|
||||
}
|
||||
|
||||
type token_item struct {
|
||||
off int
|
||||
tok token.Token
|
||||
lit string
|
||||
}
|
||||
|
||||
func (i token_item) literal() string {
|
||||
if i.tok.IsLiteral() {
|
||||
return i.lit
|
||||
} else {
|
||||
return i.tok.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func new_token_iterator(src []byte, cursor int) token_iterator {
|
||||
tokens := make([]token_item, 0, 1000)
|
||||
var s scanner.Scanner
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(src))
|
||||
s.Init(file, src, nil, 0)
|
||||
for {
|
||||
pos, tok, lit := s.Scan()
|
||||
off := fset.Position(pos).Offset
|
||||
if tok == token.EOF || cursor <= off {
|
||||
break
|
||||
}
|
||||
tokens = append(tokens, token_item{
|
||||
off: off,
|
||||
tok: tok,
|
||||
lit: lit,
|
||||
})
|
||||
}
|
||||
return token_iterator{
|
||||
tokens: tokens,
|
||||
token_index: len(tokens) - 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *token_iterator) token() token_item {
|
||||
return this.tokens[this.token_index]
|
||||
}
|
||||
|
||||
func (this *token_iterator) go_back() bool {
|
||||
if this.token_index <= 0 {
|
||||
return false
|
||||
}
|
||||
this.token_index--
|
||||
return true
|
||||
}
|
||||
|
||||
var bracket_pairs_map = map[token.Token]token.Token{
|
||||
token.RPAREN: token.LPAREN,
|
||||
token.RBRACK: token.LBRACK,
|
||||
token.RBRACE: token.LBRACE,
|
||||
}
|
||||
|
||||
func (ti *token_iterator) skip_to_left(left, right token.Token) bool {
|
||||
if ti.token().tok == left {
|
||||
return true
|
||||
}
|
||||
balance := 1
|
||||
for balance != 0 {
|
||||
if !ti.go_back() {
|
||||
return false
|
||||
}
|
||||
switch ti.token().tok {
|
||||
case right:
|
||||
balance++
|
||||
case left:
|
||||
balance--
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// when the cursor is at the ')' or ']' or '}', move the cursor to an opposite
|
||||
// bracket pair, this functions takes nested bracket pairs into account
|
||||
func (this *token_iterator) skip_to_balanced_pair() bool {
|
||||
right := this.token().tok
|
||||
left := bracket_pairs_map[right]
|
||||
return this.skip_to_left(left, right)
|
||||
}
|
||||
|
||||
// Move the cursor to the open brace of the current block, taking nested blocks
|
||||
// into account.
|
||||
func (this *token_iterator) skip_to_left_curly() bool {
|
||||
return this.skip_to_left(token.LBRACE, token.RBRACE)
|
||||
}
|
||||
|
||||
// Extract the type expression right before the enclosing curly bracket block.
|
||||
// Examples (# - the cursor):
|
||||
// &lib.Struct{Whatever: 1, Hel#} // returns "lib.Struct"
|
||||
// X{#} // returns X
|
||||
// The idea is that we check if this type expression is a type and it is, we
|
||||
// can apply special filtering for autocompletion results.
|
||||
// Sadly, this doesn't cover anonymous structs.
|
||||
func (ti *token_iterator) extract_struct_type() string {
|
||||
if !ti.skip_to_left_curly() {
|
||||
return ""
|
||||
}
|
||||
if !ti.go_back() {
|
||||
return ""
|
||||
}
|
||||
if ti.token().tok != token.IDENT {
|
||||
return ""
|
||||
}
|
||||
b := ti.token().literal()
|
||||
if !ti.go_back() {
|
||||
return b
|
||||
}
|
||||
if ti.token().tok != token.PERIOD {
|
||||
return b
|
||||
}
|
||||
if !ti.go_back() {
|
||||
return b
|
||||
}
|
||||
if ti.token().tok != token.IDENT {
|
||||
return b
|
||||
}
|
||||
return ti.token().literal() + "." + b
|
||||
}
|
||||
|
||||
// Starting from the token under the cursor move back and extract something
|
||||
// that resembles a valid Go primary expression. Examples of primary expressions
|
||||
// from Go spec:
|
||||
// x
|
||||
// 2
|
||||
// (s + ".txt")
|
||||
// f(3.1415, true)
|
||||
// Point{1, 2}
|
||||
// m["foo"]
|
||||
// s[i : j + 1]
|
||||
// obj.color
|
||||
// f.p[i].x()
|
||||
//
|
||||
// As you can see we can move through all of them using balanced bracket
|
||||
// matching and applying simple rules
|
||||
// E.g.
|
||||
// Point{1, 2}.m["foo"].s[i : j + 1].MethodCall(a, func(a, b int) int { return a + b }).
|
||||
// Can be seen as:
|
||||
// Point{ }.m[ ].s[ ].MethodCall( ).
|
||||
// Which boils the rules down to these connected via dots:
|
||||
// ident
|
||||
// ident[]
|
||||
// ident{}
|
||||
// ident()
|
||||
// Of course there are also slightly more complicated rules for brackets:
|
||||
// ident{}.ident()[5][4](), etc.
|
||||
func (this *token_iterator) extract_go_expr() string {
|
||||
orig := this.token_index
|
||||
|
||||
// Contains the type of the previously scanned token (initialized with
|
||||
// the token right under the cursor). This is the token to the *right* of
|
||||
// the current one.
|
||||
prev := this.token().tok
|
||||
loop:
|
||||
for {
|
||||
if !this.go_back() {
|
||||
return token_items_to_string(this.tokens[:orig])
|
||||
}
|
||||
switch this.token().tok {
|
||||
case token.PERIOD:
|
||||
// If the '.' is not followed by IDENT, it's invalid.
|
||||
if prev != token.IDENT {
|
||||
break loop
|
||||
}
|
||||
case token.IDENT:
|
||||
// Valid tokens after IDENT are '.', '[', '{' and '('.
|
||||
switch prev {
|
||||
case token.PERIOD, token.LBRACK, token.LBRACE, token.LPAREN:
|
||||
// all ok
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
case token.RBRACE:
|
||||
// This one can only be a part of type initialization, like:
|
||||
// Dummy{}.Hello()
|
||||
// It is valid Go if Hello method is defined on a non-pointer receiver.
|
||||
if prev != token.PERIOD {
|
||||
break loop
|
||||
}
|
||||
this.skip_to_balanced_pair()
|
||||
case token.RPAREN, token.RBRACK:
|
||||
// After ']' and ')' their opening counterparts are valid '[', '(',
|
||||
// as well as the dot.
|
||||
switch prev {
|
||||
case token.PERIOD, token.LBRACK, token.LPAREN:
|
||||
// all ok
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
this.skip_to_balanced_pair()
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
prev = this.token().tok
|
||||
}
|
||||
expr := token_items_to_string(this.tokens[this.token_index+1 : orig])
|
||||
if *g_debug {
|
||||
log.Printf("extracted expression tokens: %s", expr)
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
||||
// Given a slice of token_item, reassembles them into the original literal
|
||||
// expression.
|
||||
func token_items_to_string(tokens []token_item) string {
|
||||
var buf bytes.Buffer
|
||||
for _, t := range tokens {
|
||||
buf.WriteString(t.literal())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// this function is called when the cursor is at the '.' and you need to get the
|
||||
// declaration before that dot
|
||||
func (c *auto_complete_context) deduce_cursor_decl(iter *token_iterator) (*decl, ast.Expr) {
|
||||
expr, err := parser.ParseExpr(iter.extract_go_expr())
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return expr_to_decl(expr, c.current.scope), expr
|
||||
}
|
||||
|
||||
// try to find and extract the surrounding struct literal type
|
||||
func (c *auto_complete_context) deduce_struct_type_decl(iter *token_iterator) *decl {
|
||||
typ := iter.extract_struct_type()
|
||||
if typ == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
expr, err := parser.ParseExpr(typ)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
decl := type_to_decl(expr, c.current.scope)
|
||||
if decl == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := decl.typ.(*ast.StructType); !ok {
|
||||
return nil
|
||||
}
|
||||
return decl
|
||||
}
|
||||
|
||||
// Entry point from autocompletion, the function looks at text before the cursor
|
||||
// and figures out the declaration the cursor is on. This declaration is
|
||||
// used in filtering the resulting set of autocompletion suggestions.
|
||||
func (c *auto_complete_context) deduce_cursor_context(file []byte, cursor int) (cursor_context, bool) {
|
||||
if cursor <= 0 {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
|
||||
iter := new_token_iterator(file, cursor)
|
||||
if len(iter.tokens) == 0 {
|
||||
return cursor_context{}, false
|
||||
}
|
||||
|
||||
// figure out what is just before the cursor
|
||||
switch tok := iter.token(); tok.tok {
|
||||
case token.STRING:
|
||||
// make sure cursor is inside the string
|
||||
s := tok.literal()
|
||||
if len(s) > 1 && s[len(s)-1] == '"' && tok.off+len(s) <= cursor {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
// now figure out if inside an import declaration
|
||||
var ptok = token.STRING
|
||||
for iter.go_back() {
|
||||
itok := iter.token().tok
|
||||
switch itok {
|
||||
case token.STRING:
|
||||
switch ptok {
|
||||
case token.SEMICOLON, token.IDENT, token.PERIOD:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.LPAREN, token.SEMICOLON:
|
||||
switch ptok {
|
||||
case token.STRING, token.IDENT, token.PERIOD:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.IDENT, token.PERIOD:
|
||||
switch ptok {
|
||||
case token.STRING:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.IMPORT:
|
||||
switch ptok {
|
||||
case token.STRING, token.IDENT, token.PERIOD, token.LPAREN:
|
||||
path_len := cursor - tok.off
|
||||
path := s[1:path_len]
|
||||
return cursor_context{decl_import: true, partial: path}, true
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
ptok = itok
|
||||
}
|
||||
case token.PERIOD:
|
||||
// we're '<whatever>.'
|
||||
// figure out decl, Partial is ""
|
||||
decl, expr := c.deduce_cursor_decl(&iter)
|
||||
return cursor_context{decl: decl, expr: expr}, decl != nil
|
||||
case token.IDENT, token.TYPE, token.CONST, token.VAR, token.FUNC, token.PACKAGE:
|
||||
// we're '<whatever>.<ident>'
|
||||
// parse <ident> as Partial and figure out decl
|
||||
var partial string
|
||||
if tok.tok == token.IDENT {
|
||||
// Calculate the offset of the cursor position within the identifier.
|
||||
// For instance, if we are 'ab#c', we want partial_len = 2 and partial = ab.
|
||||
partial_len := cursor - tok.off
|
||||
|
||||
// If it happens that the cursor is past the end of the literal,
|
||||
// means there is a space between the literal and the cursor, think
|
||||
// of it as no context, because that's what it really is.
|
||||
if partial_len > len(tok.literal()) {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
partial = tok.literal()[0:partial_len]
|
||||
} else {
|
||||
// Do not try to truncate if it is not an identifier.
|
||||
partial = tok.literal()
|
||||
}
|
||||
|
||||
iter.go_back()
|
||||
switch iter.token().tok {
|
||||
case token.PERIOD:
|
||||
decl, expr := c.deduce_cursor_decl(&iter)
|
||||
return cursor_context{decl: decl, partial: partial, expr: expr}, decl != nil
|
||||
case token.COMMA, token.LBRACE:
|
||||
// This can happen for struct fields:
|
||||
// &Struct{Hello: 1, Wor#} // (# - the cursor)
|
||||
// Let's try to find the struct type
|
||||
decl := c.deduce_struct_type_decl(&iter)
|
||||
return cursor_context{
|
||||
decl: decl,
|
||||
partial: partial,
|
||||
struct_field: decl != nil,
|
||||
}, true
|
||||
default:
|
||||
return cursor_context{partial: partial}, true
|
||||
}
|
||||
case token.COMMA, token.LBRACE:
|
||||
// Try to parse the current expression as a structure initialization.
|
||||
decl := c.deduce_struct_type_decl(&iter)
|
||||
return cursor_context{
|
||||
decl: decl,
|
||||
partial: "",
|
||||
struct_field: decl != nil,
|
||||
}, true
|
||||
}
|
||||
|
||||
return cursor_context{}, true
|
||||
}
|
||||
|
||||
// Decl deduction failed, but we're on "<ident>.", this ident can be an
|
||||
// unexported package, let's try to match the ident against a set of known
|
||||
// packages and if it matches try to import it.
|
||||
// TODO: Right now I've made a static list of built-in packages, but in theory
|
||||
// we could scan all GOPATH packages as well. Now, don't forget that default
|
||||
// package name has nothing to do with package file name, that's why we need to
|
||||
// scan the packages. And many of them will have conflicts. Can we make a smart
|
||||
// prediction algorithm which will prefer certain packages over another ones?
|
||||
func resolveKnownPackageIdent(ident string, filename string, context *package_lookup_context) *decl {
|
||||
importPath, ok := knownPackageIdents[ident]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
path, ok := abs_path_for_package(filename, importPath, context)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := new_package_file_cache(path)
|
||||
p.update_cache()
|
||||
return p.main
|
||||
}
|
||||
|
||||
var knownPackageIdents = map[string]string{
|
||||
"adler32": "hash/adler32",
|
||||
"aes": "crypto/aes",
|
||||
"ascii85": "encoding/ascii85",
|
||||
"asn1": "encoding/asn1",
|
||||
"ast": "go/ast",
|
||||
"atomic": "sync/atomic",
|
||||
"base32": "encoding/base32",
|
||||
"base64": "encoding/base64",
|
||||
"big": "math/big",
|
||||
"binary": "encoding/binary",
|
||||
"bufio": "bufio",
|
||||
"build": "go/build",
|
||||
"bytes": "bytes",
|
||||
"bzip2": "compress/bzip2",
|
||||
"cgi": "net/http/cgi",
|
||||
"cgo": "runtime/cgo",
|
||||
"cipher": "crypto/cipher",
|
||||
"cmplx": "math/cmplx",
|
||||
"color": "image/color",
|
||||
"constant": "go/constant",
|
||||
"context": "context",
|
||||
"cookiejar": "net/http/cookiejar",
|
||||
"crc32": "hash/crc32",
|
||||
"crc64": "hash/crc64",
|
||||
"crypto": "crypto",
|
||||
"csv": "encoding/csv",
|
||||
"debug": "runtime/debug",
|
||||
"des": "crypto/des",
|
||||
"doc": "go/doc",
|
||||
"draw": "image/draw",
|
||||
"driver": "database/sql/driver",
|
||||
"dsa": "crypto/dsa",
|
||||
"dwarf": "debug/dwarf",
|
||||
"ecdsa": "crypto/ecdsa",
|
||||
"elf": "debug/elf",
|
||||
"elliptic": "crypto/elliptic",
|
||||
"encoding": "encoding",
|
||||
"errors": "errors",
|
||||
"exec": "os/exec",
|
||||
"expvar": "expvar",
|
||||
"fcgi": "net/http/fcgi",
|
||||
"filepath": "path/filepath",
|
||||
"flag": "flag",
|
||||
"flate": "compress/flate",
|
||||
"fmt": "fmt",
|
||||
"fnv": "hash/fnv",
|
||||
"format": "go/format",
|
||||
"gif": "image/gif",
|
||||
"gob": "encoding/gob",
|
||||
"gosym": "debug/gosym",
|
||||
"gzip": "compress/gzip",
|
||||
"hash": "hash",
|
||||
"heap": "container/heap",
|
||||
"hex": "encoding/hex",
|
||||
"hmac": "crypto/hmac",
|
||||
"hpack": "vendor/golang_org/x/net/http2/hpack",
|
||||
"html": "html",
|
||||
"http": "net/http",
|
||||
"httplex": "vendor/golang_org/x/net/lex/httplex",
|
||||
"httptest": "net/http/httptest",
|
||||
"httptrace": "net/http/httptrace",
|
||||
"httputil": "net/http/httputil",
|
||||
"image": "image",
|
||||
"importer": "go/importer",
|
||||
"io": "io",
|
||||
"iotest": "testing/iotest",
|
||||
"ioutil": "io/ioutil",
|
||||
"jpeg": "image/jpeg",
|
||||
"json": "encoding/json",
|
||||
"jsonrpc": "net/rpc/jsonrpc",
|
||||
"list": "container/list",
|
||||
"log": "log",
|
||||
"lzw": "compress/lzw",
|
||||
"macho": "debug/macho",
|
||||
"mail": "net/mail",
|
||||
"math": "math",
|
||||
"md5": "crypto/md5",
|
||||
"mime": "mime",
|
||||
"multipart": "mime/multipart",
|
||||
"net": "net",
|
||||
"os": "os",
|
||||
"palette": "image/color/palette",
|
||||
"parse": "text/template/parse",
|
||||
"parser": "go/parser",
|
||||
"path": "path",
|
||||
"pe": "debug/pe",
|
||||
"pem": "encoding/pem",
|
||||
"pkix": "crypto/x509/pkix",
|
||||
"plan9obj": "debug/plan9obj",
|
||||
"png": "image/png",
|
||||
"pprof": "net/http/pprof",
|
||||
"printer": "go/printer",
|
||||
"quick": "testing/quick",
|
||||
"quotedprintable": "mime/quotedprintable",
|
||||
"race": "runtime/race",
|
||||
"rand": "math/rand",
|
||||
"rc4": "crypto/rc4",
|
||||
"reflect": "reflect",
|
||||
"regexp": "regexp",
|
||||
"ring": "container/ring",
|
||||
"rpc": "net/rpc",
|
||||
"rsa": "crypto/rsa",
|
||||
"runtime": "runtime",
|
||||
"scanner": "text/scanner",
|
||||
"sha1": "crypto/sha1",
|
||||
"sha256": "crypto/sha256",
|
||||
"sha512": "crypto/sha512",
|
||||
"signal": "os/signal",
|
||||
"smtp": "net/smtp",
|
||||
"sort": "sort",
|
||||
"sql": "database/sql",
|
||||
"strconv": "strconv",
|
||||
"strings": "strings",
|
||||
"subtle": "crypto/subtle",
|
||||
"suffixarray": "index/suffixarray",
|
||||
"sync": "sync",
|
||||
"syntax": "regexp/syntax",
|
||||
"syscall": "syscall",
|
||||
"syslog": "log/syslog",
|
||||
"tabwriter": "text/tabwriter",
|
||||
"tar": "archive/tar",
|
||||
"template": "html/template",
|
||||
"testing": "testing",
|
||||
"textproto": "net/textproto",
|
||||
"time": "time",
|
||||
"tls": "crypto/tls",
|
||||
"token": "go/token",
|
||||
"trace": "runtime/trace",
|
||||
"types": "go/types",
|
||||
"unicode": "unicode",
|
||||
"url": "net/url",
|
||||
"user": "os/user",
|
||||
"utf16": "unicode/utf16",
|
||||
"utf8": "unicode/utf8",
|
||||
"x509": "crypto/x509",
|
||||
"xml": "encoding/xml",
|
||||
"zip": "archive/zip",
|
||||
"zlib": "compress/zlib",
|
||||
//"scanner": "go/scanner", // DUP: prefer text/scanner
|
||||
//"template": "text/template", // DUP: prefer html/template
|
||||
//"pprof": "runtime/pprof", // DUP: prefer net/http/pprof
|
||||
//"rand": "crypto/rand", // DUP: prefer math/rand
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,518 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// []package_import
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_import struct {
|
||||
alias string
|
||||
path string
|
||||
}
|
||||
|
||||
// Parses import declarations until the first non-import declaration and fills
|
||||
// `packages` array with import information.
|
||||
func collect_package_imports(filename string, decls []ast.Decl, context *package_lookup_context) []package_import {
|
||||
pi := make([]package_import, 0, 16)
|
||||
for _, decl := range decls {
|
||||
if gd, ok := decl.(*ast.GenDecl); ok && gd.Tok == token.IMPORT {
|
||||
for _, spec := range gd.Specs {
|
||||
imp := spec.(*ast.ImportSpec)
|
||||
path, alias := path_and_alias(imp)
|
||||
path, ok := abs_path_for_package(filename, path, context)
|
||||
if ok && alias != "_" {
|
||||
pi = append(pi, package_import{alias, path})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return pi
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// decl_file_cache
|
||||
//
|
||||
// Contains cache for top-level declarations of a file as well as its
|
||||
// contents, AST and import information.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type decl_file_cache struct {
|
||||
name string // file name
|
||||
mtime int64 // last modification time
|
||||
|
||||
decls map[string]*decl // top-level declarations
|
||||
error error // last error
|
||||
packages []package_import // import information
|
||||
filescope *scope
|
||||
|
||||
fset *token.FileSet
|
||||
context *package_lookup_context
|
||||
}
|
||||
|
||||
func new_decl_file_cache(name string, context *package_lookup_context) *decl_file_cache {
|
||||
return &decl_file_cache{
|
||||
name: name,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) update() {
|
||||
stat, err := os.Stat(f.name)
|
||||
if err != nil {
|
||||
f.decls = nil
|
||||
f.error = err
|
||||
f.fset = nil
|
||||
return
|
||||
}
|
||||
|
||||
statmtime := stat.ModTime().UnixNano()
|
||||
if f.mtime == statmtime {
|
||||
return
|
||||
}
|
||||
|
||||
f.mtime = statmtime
|
||||
f.read_file()
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) read_file() {
|
||||
var data []byte
|
||||
data, f.error = file_reader.read_file(f.name)
|
||||
if f.error != nil {
|
||||
return
|
||||
}
|
||||
data, _ = filter_out_shebang(data)
|
||||
|
||||
f.process_data(data)
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) process_data(data []byte) {
|
||||
var file *ast.File
|
||||
f.fset = token.NewFileSet()
|
||||
file, f.error = parser.ParseFile(f.fset, "", data, 0)
|
||||
f.filescope = new_scope(nil)
|
||||
for _, d := range file.Decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
f.packages = collect_package_imports(f.name, file.Decls, f.context)
|
||||
f.decls = make(map[string]*decl, len(file.Decls))
|
||||
for _, decl := range file.Decls {
|
||||
append_to_top_decls(f.decls, decl, f.filescope)
|
||||
}
|
||||
}
|
||||
|
||||
func append_to_top_decls(decls map[string]*decl, decl ast.Decl, scope *scope) {
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, 0, typ, v, vi, scope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
methodof := method_of(decl)
|
||||
if methodof != "" {
|
||||
decl, ok := decls[methodof]
|
||||
if ok {
|
||||
decl.add_child(d)
|
||||
} else {
|
||||
decl = new_decl(methodof, decl_methods_stub, scope)
|
||||
decls[methodof] = decl
|
||||
decl.add_child(d)
|
||||
}
|
||||
} else {
|
||||
decl, ok := decls[d.name]
|
||||
if ok {
|
||||
decl.expand_or_replace(d)
|
||||
} else {
|
||||
decls[d.name] = d
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func abs_path_for_package(filename, p string, context *package_lookup_context) (string, bool) {
|
||||
dir, _ := filepath.Split(filename)
|
||||
if len(p) == 0 {
|
||||
return "", false
|
||||
}
|
||||
if p[0] == '.' {
|
||||
return fmt.Sprintf("%s.a", filepath.Join(dir, p)), true
|
||||
}
|
||||
pkg, ok := find_go_dag_package(p, dir)
|
||||
if ok {
|
||||
return pkg, true
|
||||
}
|
||||
return find_global_file(p, context)
|
||||
}
|
||||
|
||||
func path_and_alias(imp *ast.ImportSpec) (string, string) {
|
||||
path := ""
|
||||
if imp.Path != nil && len(imp.Path.Value) > 0 {
|
||||
path = string(imp.Path.Value)
|
||||
path = path[1 : len(path)-1]
|
||||
}
|
||||
alias := ""
|
||||
if imp.Name != nil {
|
||||
alias = imp.Name.Name
|
||||
}
|
||||
return path, alias
|
||||
}
|
||||
|
||||
func find_go_dag_package(imp, filedir string) (string, bool) {
|
||||
// Support godag directory structure
|
||||
dir, pkg := filepath.Split(imp)
|
||||
godag_pkg := filepath.Join(filedir, "..", dir, "_obj", pkg+".a")
|
||||
if file_exists(godag_pkg) {
|
||||
return godag_pkg, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// autobuild compares the mod time of the source files of the package, and if any of them is newer
|
||||
// than the package object file will rebuild it.
|
||||
func autobuild(p *build.Package) error {
|
||||
if p.Dir == "" {
|
||||
return fmt.Errorf("no files to build")
|
||||
}
|
||||
ps, err := os.Stat(p.PkgObj)
|
||||
if err != nil {
|
||||
// Assume package file does not exist and build for the first time.
|
||||
return build_package(p)
|
||||
}
|
||||
pt := ps.ModTime()
|
||||
fs, err := readdir_lstat(p.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range fs {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if f.ModTime().After(pt) {
|
||||
// Source file is newer than package file; rebuild.
|
||||
return build_package(p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// build_package builds the package by calling `go install package/import`. If everything compiles
|
||||
// correctly, the newly compiled package should then be in the usual place in the `$GOPATH/pkg`
|
||||
// directory, and gocode will pick it up from there.
|
||||
func build_package(p *build.Package) error {
|
||||
if *g_debug {
|
||||
log.Printf("-------------------")
|
||||
log.Printf("rebuilding package %s", p.Name)
|
||||
log.Printf("package import: %s", p.ImportPath)
|
||||
log.Printf("package object: %s", p.PkgObj)
|
||||
log.Printf("package source dir: %s", p.Dir)
|
||||
log.Printf("package source files: %v", p.GoFiles)
|
||||
log.Printf("GOPATH: %v", g_daemon.context.GOPATH)
|
||||
log.Printf("GOROOT: %v", g_daemon.context.GOROOT)
|
||||
}
|
||||
env := os.Environ()
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, "GOPATH=") {
|
||||
env[i] = "GOPATH=" + g_daemon.context.GOPATH
|
||||
} else if strings.HasPrefix(v, "GOROOT=") {
|
||||
env[i] = "GOROOT=" + g_daemon.context.GOROOT
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "install", p.ImportPath)
|
||||
cmd.Env = env
|
||||
|
||||
// TODO: Should read STDERR rather than STDOUT.
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *g_debug {
|
||||
log.Printf("build out: %s\n", string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// executes autobuild function if autobuild option is enabled, logs error and
|
||||
// ignores it
|
||||
func try_autobuild(p *build.Package) {
|
||||
if g_config.Autobuild {
|
||||
err := autobuild(p)
|
||||
if err != nil && *g_debug {
|
||||
log.Printf("Autobuild error: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func log_found_package_maybe(imp, pkgpath string) {
|
||||
if *g_debug {
|
||||
log.Printf("Found %q at %q\n", imp, pkgpath)
|
||||
}
|
||||
}
|
||||
|
||||
func log_build_context(context *package_lookup_context) {
|
||||
log.Printf(" GOROOT: %s\n", context.GOROOT)
|
||||
log.Printf(" GOPATH: %s\n", context.GOPATH)
|
||||
log.Printf(" GOOS: %s\n", context.GOOS)
|
||||
log.Printf(" GOARCH: %s\n", context.GOARCH)
|
||||
log.Printf(" BzlProjectRoot: %q\n", context.BzlProjectRoot)
|
||||
log.Printf(" GBProjectRoot: %q\n", context.GBProjectRoot)
|
||||
log.Printf(" lib-path: %q\n", g_config.LibPath)
|
||||
}
|
||||
|
||||
// find_global_file returns the file path of the compiled package corresponding to the specified
|
||||
// import, and a boolean stating whether such path is valid.
|
||||
// TODO: Return only one value, possibly empty string if not found.
|
||||
func find_global_file(imp string, context *package_lookup_context) (string, bool) {
|
||||
// gocode synthetically generates the builtin package
|
||||
// "unsafe", since the "unsafe.a" package doesn't really exist.
|
||||
// Thus, when the user request for the package "unsafe" we
|
||||
// would return synthetic global file that would be used
|
||||
// just as a key name to find this synthetic package
|
||||
if imp == "unsafe" {
|
||||
return "unsafe", true
|
||||
}
|
||||
|
||||
pkgfile := fmt.Sprintf("%s.a", imp)
|
||||
|
||||
// if lib-path is defined, use it
|
||||
if g_config.LibPath != "" {
|
||||
for _, p := range filepath.SplitList(g_config.LibPath) {
|
||||
pkg_path := filepath.Join(p, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
// Also check the relevant pkg/OS_ARCH dir for the libpath, if provided.
|
||||
pkgdir := fmt.Sprintf("%s_%s", context.GOOS, context.GOARCH)
|
||||
pkg_path = filepath.Join(p, "pkg", pkgdir, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gb-specific lookup mode, only if the root dir was found
|
||||
if g_config.PackageLookupMode == "gb" && context.GBProjectRoot != "" {
|
||||
root := context.GBProjectRoot
|
||||
pkg_path := filepath.Join(root, "pkg", context.GOOS+"-"+context.GOARCH, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
|
||||
// bzl-specific lookup mode, only if the root dir was found
|
||||
if g_config.PackageLookupMode == "bzl" && context.BzlProjectRoot != "" {
|
||||
var root, impath string
|
||||
if strings.HasPrefix(imp, g_config.CustomPkgPrefix+"/") {
|
||||
root = filepath.Join(context.BzlProjectRoot, "bazel-bin")
|
||||
impath = imp[len(g_config.CustomPkgPrefix)+1:]
|
||||
} else if g_config.CustomVendorDir != "" {
|
||||
// Try custom vendor dir.
|
||||
root = filepath.Join(context.BzlProjectRoot, "bazel-bin", g_config.CustomVendorDir)
|
||||
impath = imp
|
||||
}
|
||||
|
||||
if root != "" && impath != "" {
|
||||
// There might be more than one ".a" files in the pkg path with bazel.
|
||||
// But the best practice is to keep one go_library build target in each
|
||||
// pakcage directory so that it follows the standard Go package
|
||||
// structure. Thus here we assume there is at most one ".a" file existing
|
||||
// in the pkg path.
|
||||
if d, err := os.Open(filepath.Join(root, impath)); err == nil {
|
||||
defer d.Close()
|
||||
|
||||
if fis, err := d.Readdir(-1); err == nil {
|
||||
for _, fi := range fis {
|
||||
if !fi.IsDir() && filepath.Ext(fi.Name()) == ".a" {
|
||||
pkg_path := filepath.Join(root, impath, fi.Name())
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if context.CurrentPackagePath != "" {
|
||||
// Try vendor path first, see GO15VENDOREXPERIMENT.
|
||||
// We don't check this environment variable however, seems like there is
|
||||
// almost no harm in doing so (well.. if you experiment with vendoring,
|
||||
// gocode will fail after enabling/disabling the flag, and you'll be
|
||||
// forced to get rid of vendor binaries). But asking users to set this
|
||||
// env var is up will bring more trouble. Because we also need to pass
|
||||
// it from client to server, make sure their editors set it, etc.
|
||||
// So, whatever, let's just pretend it's always on.
|
||||
package_path := context.CurrentPackagePath
|
||||
for {
|
||||
limp := filepath.Join(package_path, "vendor", imp)
|
||||
if p, err := context.Import(limp, "", build.AllowBinary|build.FindOnly); err == nil {
|
||||
try_autobuild(p)
|
||||
if file_exists(p.PkgObj) {
|
||||
log_found_package_maybe(imp, p.PkgObj)
|
||||
return p.PkgObj, true
|
||||
}
|
||||
}
|
||||
if package_path == "" {
|
||||
break
|
||||
}
|
||||
next_path := filepath.Dir(package_path)
|
||||
// let's protect ourselves from inf recursion here
|
||||
if next_path == package_path {
|
||||
break
|
||||
}
|
||||
package_path = next_path
|
||||
}
|
||||
}
|
||||
|
||||
if p, err := context.Import(imp, "", build.AllowBinary|build.FindOnly); err == nil {
|
||||
try_autobuild(p)
|
||||
if file_exists(p.PkgObj) {
|
||||
log_found_package_maybe(imp, p.PkgObj)
|
||||
return p.PkgObj, true
|
||||
}
|
||||
}
|
||||
|
||||
if *g_debug {
|
||||
log.Printf("Import path %q was not resolved\n", imp)
|
||||
log.Println("Gocode's build context is:")
|
||||
log_build_context(context)
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func package_name(file *ast.File) string {
|
||||
if file.Name != nil {
|
||||
return file.Name.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// decl_cache
|
||||
//
|
||||
// Thread-safe collection of DeclFileCache entities.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_lookup_context struct {
|
||||
build.Context
|
||||
BzlProjectRoot string
|
||||
GBProjectRoot string
|
||||
CurrentPackagePath string
|
||||
}
|
||||
|
||||
// gopath returns the list of Go path directories.
|
||||
func (ctxt *package_lookup_context) gopath() []string {
|
||||
var all []string
|
||||
for _, p := range filepath.SplitList(ctxt.GOPATH) {
|
||||
if p == "" || p == ctxt.GOROOT {
|
||||
// Empty paths are uninteresting.
|
||||
// If the path is the GOROOT, ignore it.
|
||||
// People sometimes set GOPATH=$GOROOT.
|
||||
// Do not get confused by this common mistake.
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(p, "~") {
|
||||
// Path segments starting with ~ on Unix are almost always
|
||||
// users who have incorrectly quoted ~ while setting GOPATH,
|
||||
// preventing it from expanding to $HOME.
|
||||
// The situation is made more confusing by the fact that
|
||||
// bash allows quoted ~ in $PATH (most shells do not).
|
||||
// Do not get confused by this, and do not try to use the path.
|
||||
// It does not exist, and printing errors about it confuses
|
||||
// those users even more, because they think "sure ~ exists!".
|
||||
// The go command diagnoses this situation and prints a
|
||||
// useful error.
|
||||
// On Windows, ~ is used in short names, such as c:\progra~1
|
||||
// for c:\program files.
|
||||
continue
|
||||
}
|
||||
all = append(all, p)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (ctxt *package_lookup_context) pkg_dirs() []string {
|
||||
pkgdir := fmt.Sprintf("%s_%s", ctxt.GOOS, ctxt.GOARCH)
|
||||
|
||||
var all []string
|
||||
if ctxt.GOROOT != "" {
|
||||
dir := filepath.Join(ctxt.GOROOT, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
|
||||
switch g_config.PackageLookupMode {
|
||||
case "go":
|
||||
for _, p := range ctxt.gopath() {
|
||||
dir := filepath.Join(p, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
case "gb":
|
||||
if ctxt.GBProjectRoot != "" {
|
||||
pkgdir := fmt.Sprintf("%s-%s", ctxt.GOOS, ctxt.GOARCH)
|
||||
dir := filepath.Join(ctxt.GBProjectRoot, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
case "bzl":
|
||||
// TODO: Support bazel mode
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
type decl_cache struct {
|
||||
cache map[string]*decl_file_cache
|
||||
context *package_lookup_context
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func new_decl_cache(context *package_lookup_context) *decl_cache {
|
||||
return &decl_cache{
|
||||
cache: make(map[string]*decl_file_cache),
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *decl_cache) get(filename string) *decl_file_cache {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
f, ok := c.cache[filename]
|
||||
if !ok {
|
||||
f = new_decl_file_cache(filename, c.context)
|
||||
c.cache[filename] = f
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (c *decl_cache) get_and_update(filename string) *decl_file_cache {
|
||||
f := c.get(filename)
|
||||
f.update()
|
||||
return f
|
||||
}
|
|
@ -1,172 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// formatter interfaces
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type formatter interface {
|
||||
write_candidates(candidates []candidate, num int)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// nice_formatter (just for testing, simple textual output)
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type nice_formatter struct{}
|
||||
|
||||
func (*nice_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Printf("Nothing to complete.\n")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d candidates:\n", len(candidates))
|
||||
for _, c := range candidates {
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf(" %s\n", abbr)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// vim_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type vim_formatter struct{}
|
||||
|
||||
func (*vim_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Print("[0, []]")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("[%d, [", num)
|
||||
for i, c := range candidates {
|
||||
if i != 0 {
|
||||
fmt.Printf(", ")
|
||||
}
|
||||
|
||||
word := c.Name
|
||||
if c.Class == decl_func {
|
||||
word += "("
|
||||
if strings.HasPrefix(c.Type, "func()") {
|
||||
word += ")"
|
||||
}
|
||||
}
|
||||
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf("{'word': '%s', 'abbr': '%s', 'info': '%s'}", word, abbr, abbr)
|
||||
}
|
||||
fmt.Printf("]]")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// godit_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type godit_formatter struct{}
|
||||
|
||||
func (*godit_formatter) write_candidates(candidates []candidate, num int) {
|
||||
fmt.Printf("%d,,%d\n", num, len(candidates))
|
||||
for _, c := range candidates {
|
||||
contents := c.Name
|
||||
if c.Class == decl_func {
|
||||
contents += "("
|
||||
if strings.HasPrefix(c.Type, "func()") {
|
||||
contents += ")"
|
||||
}
|
||||
}
|
||||
|
||||
display := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
display = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf("%s,,%s\n", display, contents)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// emacs_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type emacs_formatter struct{}
|
||||
|
||||
func (*emacs_formatter) write_candidates(candidates []candidate, num int) {
|
||||
for _, c := range candidates {
|
||||
var hint string
|
||||
switch {
|
||||
case c.Class == decl_func:
|
||||
hint = c.Type
|
||||
case c.Type == "":
|
||||
hint = c.Class.String()
|
||||
default:
|
||||
hint = c.Class.String() + " " + c.Type
|
||||
}
|
||||
fmt.Printf("%s,,%s\n", c.Name, hint)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// csv_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type csv_formatter struct{}
|
||||
|
||||
func (*csv_formatter) write_candidates(candidates []candidate, num int) {
|
||||
for _, c := range candidates {
|
||||
fmt.Printf("%s,,%s,,%s\n", c.Class, c.Name, c.Type)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// json_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type json_formatter struct{}
|
||||
|
||||
func (*json_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Print("[]")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(`[%d, [`, num)
|
||||
for i, c := range candidates {
|
||||
if i != 0 {
|
||||
fmt.Printf(", ")
|
||||
}
|
||||
fmt.Printf(`{"class": "%s", "name": "%s", "type": "%s"}`,
|
||||
c.Class, c.Name, c.Type)
|
||||
}
|
||||
fmt.Print("]]")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func get_formatter(name string) formatter {
|
||||
switch name {
|
||||
case "vim":
|
||||
return new(vim_formatter)
|
||||
case "emacs":
|
||||
return new(emacs_formatter)
|
||||
case "nice":
|
||||
return new(nice_formatter)
|
||||
case "csv":
|
||||
return new(csv_formatter)
|
||||
case "json":
|
||||
return new(json_formatter)
|
||||
case "godit":
|
||||
return new(godit_formatter)
|
||||
}
|
||||
return new(nice_formatter)
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
g_is_server = flag.Bool("s", false, "run a server instead of a client")
|
||||
g_format = flag.String("f", "nice", "output format (vim | emacs | nice | csv | json)")
|
||||
g_input = flag.String("in", "", "use this file instead of stdin input")
|
||||
g_sock = create_sock_flag("sock", "socket type (unix | tcp)")
|
||||
g_addr = flag.String("addr", "127.0.0.1:37373", "address for tcp socket")
|
||||
g_debug = flag.Bool("debug", false, "enable server-side debug mode")
|
||||
g_profile = flag.Int("profile", 0, "port on which to expose profiling information for pprof; 0 to disable profiling")
|
||||
)
|
||||
|
||||
func get_socket_filename() string {
|
||||
user := os.Getenv("USER")
|
||||
if user == "" {
|
||||
user = "all"
|
||||
}
|
||||
return filepath.Join(os.TempDir(), fmt.Sprintf("gocode-daemon.%s", user))
|
||||
}
|
||||
|
||||
func show_usage() {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Usage: %s [-s] [-f=<format>] [-in=<path>] [-sock=<type>] [-addr=<addr>]\n"+
|
||||
" <command> [<args>]\n\n",
|
||||
os.Args[0])
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Flags:\n")
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"\nCommands:\n"+
|
||||
" autocomplete [<path>] <offset> main autocompletion command\n"+
|
||||
" close close the gocode daemon\n"+
|
||||
" status gocode daemon status report\n"+
|
||||
" drop-cache drop gocode daemon's cache\n"+
|
||||
" set [<name> [<value>]] list or set config options\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = show_usage
|
||||
flag.Parse()
|
||||
|
||||
var retval int
|
||||
if *g_is_server {
|
||||
go func() {
|
||||
if *g_profile <= 0 {
|
||||
return
|
||||
}
|
||||
addr := fmt.Sprintf("localhost:%d", *g_profile)
|
||||
// Use the following commands to profile the binary:
|
||||
// go tool pprof http://localhost:6060/debug/pprof/profile # 30-second CPU profile
|
||||
// go tool pprof http://localhost:6060/debug/pprof/heap # heap profile
|
||||
// go tool pprof http://localhost:6060/debug/pprof/block # goroutine blocking profile
|
||||
// See http://blog.golang.org/profiling-go-programs for more info.
|
||||
log.Printf("enabling profiler on %s", addr)
|
||||
log.Print(http.ListenAndServe(addr, nil))
|
||||
}()
|
||||
retval = do_server()
|
||||
} else {
|
||||
retval = do_client()
|
||||
}
|
||||
os.Exit(retval)
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func create_sock_flag(name, desc string) *string {
|
||||
return flag.String(name, "unix", desc)
|
||||
}
|
||||
|
||||
// Full path of the current executable
|
||||
func get_executable_filename() string {
|
||||
// try readlink first
|
||||
path, err := os.Readlink("/proc/self/exe")
|
||||
if err == nil {
|
||||
return path
|
||||
}
|
||||
// use argv[0]
|
||||
path = os.Args[0]
|
||||
if !filepath.IsAbs(path) {
|
||||
cwd, _ := os.Getwd()
|
||||
path = filepath.Join(cwd, path)
|
||||
}
|
||||
if file_exists(path) {
|
||||
return path
|
||||
}
|
||||
// Fallback : use "gocode" and assume we are in the PATH...
|
||||
path, err = exec.LookPath("gocode")
|
||||
if err == nil {
|
||||
return path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// config location
|
||||
|
||||
func config_dir() string {
|
||||
return filepath.Join(xdg_home_dir(), "gocode")
|
||||
}
|
||||
|
||||
func config_file() string {
|
||||
return filepath.Join(xdg_home_dir(), "gocode", "config.json")
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
shell32 = syscall.NewLazyDLL("shell32.dll")
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
)
|
||||
|
||||
var (
|
||||
proc_sh_get_folder_path = shell32.NewProc("SHGetFolderPathW")
|
||||
proc_get_module_file_name = kernel32.NewProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
func create_sock_flag(name, desc string) *string {
|
||||
return flag.String(name, "tcp", desc)
|
||||
}
|
||||
|
||||
// Full path of the current executable
|
||||
func get_executable_filename() string {
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
ret, _, err := syscall.Syscall(proc_get_module_file_name.Addr(), 3,
|
||||
0, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
|
||||
if int(ret) == 0 {
|
||||
panic(fmt.Sprintf("GetModuleFileNameW : err %d", int(err)))
|
||||
}
|
||||
return syscall.UTF16ToString(b)
|
||||
}
|
||||
|
||||
const (
|
||||
csidl_appdata = 0x1a
|
||||
)
|
||||
|
||||
func get_appdata_folder_path() string {
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
ret, _, err := syscall.Syscall6(proc_sh_get_folder_path.Addr(), 5,
|
||||
0, csidl_appdata, 0, 0, uintptr(unsafe.Pointer(&b[0])), 0)
|
||||
if int(ret) != 0 {
|
||||
panic(fmt.Sprintf("SHGetFolderPathW : err %d", int(err)))
|
||||
}
|
||||
return syscall.UTF16ToString(b)
|
||||
}
|
||||
|
||||
func config_dir() string {
|
||||
return filepath.Join(get_appdata_folder_path(), "gocode")
|
||||
}
|
||||
|
||||
func config_file() string {
|
||||
return filepath.Join(get_appdata_folder_path(), "gocode", "config.json")
|
||||
}
|
|
@ -1,256 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type package_parser interface {
|
||||
parse_export(callback func(pkg string, decl ast.Decl))
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// package_file_cache
|
||||
//
|
||||
// Structure that represents a cache for an imported pacakge. In other words
|
||||
// these are the contents of an archive (*.a) file.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_file_cache struct {
|
||||
name string // file name
|
||||
mtime int64
|
||||
defalias string
|
||||
|
||||
scope *scope
|
||||
main *decl // package declaration
|
||||
others map[string]*decl
|
||||
}
|
||||
|
||||
func new_package_file_cache(name string) *package_file_cache {
|
||||
m := new(package_file_cache)
|
||||
m.name = name
|
||||
m.mtime = 0
|
||||
m.defalias = ""
|
||||
return m
|
||||
}
|
||||
|
||||
// Creates a cache that stays in cache forever. Useful for built-in packages.
|
||||
func new_package_file_cache_forever(name, defalias string) *package_file_cache {
|
||||
m := new(package_file_cache)
|
||||
m.name = name
|
||||
m.mtime = -1
|
||||
m.defalias = defalias
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *package_file_cache) find_file() string {
|
||||
if file_exists(m.name) {
|
||||
return m.name
|
||||
}
|
||||
|
||||
n := len(m.name)
|
||||
filename := m.name[:n-1] + "6"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
|
||||
filename = m.name[:n-1] + "8"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
|
||||
filename = m.name[:n-1] + "5"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *package_file_cache) update_cache() {
|
||||
if m.mtime == -1 {
|
||||
return
|
||||
}
|
||||
fname := m.find_file()
|
||||
stat, err := os.Stat(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
statmtime := stat.ModTime().UnixNano()
|
||||
if m.mtime != statmtime {
|
||||
m.mtime = statmtime
|
||||
|
||||
data, err := file_reader.read_file(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m.process_package_data(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *package_file_cache) process_package_data(data []byte) {
|
||||
m.scope = new_scope(g_universe_scope)
|
||||
|
||||
// find import section
|
||||
i := bytes.Index(data, []byte{'\n', '$', '$'})
|
||||
if i == -1 {
|
||||
panic(fmt.Sprintf("Can't find the import section in the package file %s", m.name))
|
||||
}
|
||||
data = data[i+len("\n$$"):]
|
||||
|
||||
// main package
|
||||
m.main = new_decl(m.name, decl_package, nil)
|
||||
// create map for other packages
|
||||
m.others = make(map[string]*decl)
|
||||
|
||||
var pp package_parser
|
||||
if data[0] == 'B' {
|
||||
// binary format, skip 'B\n'
|
||||
data = data[2:]
|
||||
var p gc_bin_parser
|
||||
p.init(data, m)
|
||||
pp = &p
|
||||
} else {
|
||||
// textual format, find the beginning of the package clause
|
||||
i = bytes.Index(data, []byte{'p', 'a', 'c', 'k', 'a', 'g', 'e'})
|
||||
if i == -1 {
|
||||
panic("Can't find the package clause")
|
||||
}
|
||||
data = data[i:]
|
||||
|
||||
var p gc_parser
|
||||
p.init(data, m)
|
||||
pp = &p
|
||||
}
|
||||
|
||||
pp.parse_export(func(pkg string, decl ast.Decl) {
|
||||
anonymify_ast(decl, decl_foreign, m.scope)
|
||||
if pkg == "" || strings.HasPrefix(pkg, "#") {
|
||||
// main package
|
||||
add_ast_decl_to_package(m.main, decl, m.scope)
|
||||
} else {
|
||||
// others
|
||||
if _, ok := m.others[pkg]; !ok {
|
||||
m.others[pkg] = new_decl(pkg, decl_package, nil)
|
||||
}
|
||||
add_ast_decl_to_package(m.others[pkg], decl, m.scope)
|
||||
}
|
||||
})
|
||||
|
||||
// hack, add ourselves to the package scope
|
||||
mainName := "#" + m.defalias
|
||||
m.add_package_to_scope(mainName, m.name)
|
||||
|
||||
// replace dummy package decls in package scope to actual packages
|
||||
for key := range m.scope.entities {
|
||||
if !strings.HasPrefix(key, "#") && !strings.HasPrefix(key, "!") {
|
||||
continue
|
||||
}
|
||||
pkg, ok := m.others[key]
|
||||
if !ok && key == mainName {
|
||||
pkg = m.main
|
||||
}
|
||||
m.scope.replace_decl(key, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *package_file_cache) add_package_to_scope(alias, realname string) {
|
||||
d := new_decl(realname, decl_package, nil)
|
||||
m.scope.add_decl(alias, d)
|
||||
}
|
||||
|
||||
func add_ast_decl_to_package(pkg *decl, decl ast.Decl, scope *scope) {
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, decl_foreign, typ, v, vi, scope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !name.IsExported() && d.class != decl_type {
|
||||
return
|
||||
}
|
||||
|
||||
methodof := method_of(data.decl)
|
||||
if methodof != "" {
|
||||
decl := pkg.find_child(methodof)
|
||||
if decl != nil {
|
||||
decl.add_child(d)
|
||||
} else {
|
||||
decl = new_decl(methodof, decl_methods_stub, scope)
|
||||
decl.add_child(d)
|
||||
pkg.add_child(decl)
|
||||
}
|
||||
} else {
|
||||
decl := pkg.find_child(d.name)
|
||||
if decl != nil {
|
||||
decl.expand_or_replace(d)
|
||||
} else {
|
||||
pkg.add_child(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// package_cache
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_cache map[string]*package_file_cache
|
||||
|
||||
func new_package_cache() package_cache {
|
||||
m := make(package_cache)
|
||||
|
||||
// add built-in "unsafe" package
|
||||
m.add_builtin_unsafe_package()
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Function fills 'ps' set with packages from 'packages' import information.
|
||||
// In case if package is not in the cache, it creates one and adds one to the cache.
|
||||
func (c package_cache) append_packages(ps map[string]*package_file_cache, pkgs []package_import) {
|
||||
for _, m := range pkgs {
|
||||
if _, ok := ps[m.path]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if mod, ok := c[m.path]; ok {
|
||||
ps[m.path] = mod
|
||||
} else {
|
||||
mod = new_package_file_cache(m.path)
|
||||
ps[m.path] = mod
|
||||
c[m.path] = mod
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var g_builtin_unsafe_package = []byte(`
|
||||
import
|
||||
$$
|
||||
package unsafe
|
||||
type @"".Pointer uintptr
|
||||
func @"".Offsetof (? any) uintptr
|
||||
func @"".Sizeof (? any) uintptr
|
||||
func @"".Alignof (? any) uintptr
|
||||
func @"".Typeof (i interface { }) interface { }
|
||||
func @"".Reflect (i interface { }) (typ interface { }, addr @"".Pointer)
|
||||
func @"".Unreflect (typ interface { }, addr @"".Pointer) interface { }
|
||||
func @"".New (typ interface { }) @"".Pointer
|
||||
func @"".NewArray (typ interface { }, n int) @"".Pointer
|
||||
|
||||
$$
|
||||
`)
|
||||
|
||||
func (c package_cache) add_builtin_unsafe_package() {
|
||||
pkg := new_package_file_cache_forever("unsafe", "unsafe")
|
||||
pkg.process_package_data(g_builtin_unsafe_package)
|
||||
c["unsafe"] = pkg
|
||||
}
|
|
@ -1,762 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// gc_bin_parser
|
||||
//
|
||||
// The following part of the code may contain portions of the code from the Go
|
||||
// standard library, which tells me to retain their copyright notice:
|
||||
//
|
||||
// Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type gc_bin_parser struct {
|
||||
data []byte
|
||||
buf []byte // for reading strings
|
||||
version int
|
||||
|
||||
// object lists
|
||||
strList []string // in order of appearance
|
||||
pkgList []string // in order of appearance
|
||||
typList []ast.Expr // in order of appearance
|
||||
callback func(pkg string, decl ast.Decl)
|
||||
pfc *package_file_cache
|
||||
trackAllTypes bool
|
||||
|
||||
// position encoding
|
||||
posInfoFormat bool
|
||||
prevFile string
|
||||
prevLine int
|
||||
|
||||
// debugging support
|
||||
debugFormat bool
|
||||
read int // bytes read
|
||||
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) init(data []byte, pfc *package_file_cache) {
|
||||
p.data = data
|
||||
p.version = -1 // unknown version
|
||||
p.strList = []string{""} // empty string is mapped to 0
|
||||
p.pfc = pfc
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) parse_export(callback func(string, ast.Decl)) {
|
||||
p.callback = callback
|
||||
|
||||
// read version info
|
||||
var versionstr string
|
||||
if b := p.rawByte(); b == 'c' || b == 'd' {
|
||||
// Go1.7 encoding; first byte encodes low-level
|
||||
// encoding format (compact vs debug).
|
||||
// For backward-compatibility only (avoid problems with
|
||||
// old installed packages). Newly compiled packages use
|
||||
// the extensible format string.
|
||||
// TODO(gri) Remove this support eventually; after Go1.8.
|
||||
if b == 'd' {
|
||||
p.debugFormat = true
|
||||
}
|
||||
p.trackAllTypes = p.rawByte() == 'a'
|
||||
p.posInfoFormat = p.int() != 0
|
||||
versionstr = p.string()
|
||||
if versionstr == "v1" {
|
||||
p.version = 0
|
||||
}
|
||||
} else {
|
||||
// Go1.8 extensible encoding
|
||||
// read version string and extract version number (ignore anything after the version number)
|
||||
versionstr = p.rawStringln(b)
|
||||
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
||||
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
||||
p.version = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read version specific flags - extend as necessary
|
||||
switch p.version {
|
||||
// case 4:
|
||||
// ...
|
||||
// fallthrough
|
||||
case 3, 2, 1:
|
||||
// Support for Go 1.8 type aliases will be added very
|
||||
// soon (Oct 2016). In the meantime, we make a
|
||||
// best-effort attempt to read v3 export data, failing
|
||||
// if we encounter a type alias. This allows the
|
||||
// automated builders to make progress since
|
||||
// type aliases are not yet used in practice.
|
||||
// TODO(gri): add support for type aliases.
|
||||
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
||||
p.trackAllTypes = p.int() != 0
|
||||
p.posInfoFormat = p.int() != 0
|
||||
case 0:
|
||||
// Go1.7 encoding format - nothing to do here
|
||||
default:
|
||||
panic(fmt.Errorf("unknown export format version %d (%q)", p.version, versionstr))
|
||||
}
|
||||
|
||||
// --- generic export data ---
|
||||
|
||||
// populate typList with predeclared "known" types
|
||||
p.typList = append(p.typList, predeclared...)
|
||||
|
||||
// read package data
|
||||
p.pfc.defalias = p.pkg()[1:]
|
||||
|
||||
// read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
|
||||
objcount := 0
|
||||
for {
|
||||
tag := p.tagOrIndex()
|
||||
if tag == endTag {
|
||||
break
|
||||
}
|
||||
p.obj(tag)
|
||||
objcount++
|
||||
}
|
||||
|
||||
// self-verification
|
||||
if count := p.int(); count != objcount {
|
||||
panic(fmt.Sprintf("got %d objects; want %d", objcount, count))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) pkg() string {
|
||||
// if the package was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.pkgList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the package tag (< 0)
|
||||
if i != packageTag {
|
||||
panic(fmt.Sprintf("unexpected package tag %d", i))
|
||||
}
|
||||
|
||||
// read package data
|
||||
name := p.string()
|
||||
path := p.string()
|
||||
|
||||
// we should never see an empty package name
|
||||
if name == "" {
|
||||
panic("empty package name in import")
|
||||
}
|
||||
|
||||
// an empty path denotes the package we are currently importing;
|
||||
// it must be the first package we see
|
||||
if (path == "") != (len(p.pkgList) == 0) {
|
||||
panic(fmt.Sprintf("package path %q for pkg index %d", path, len(p.pkgList)))
|
||||
}
|
||||
|
||||
var fullName string
|
||||
if path != "" {
|
||||
fullName = "!" + path + "!" + name
|
||||
p.pfc.add_package_to_scope(fullName, path)
|
||||
} else {
|
||||
fullName = "#" + name
|
||||
}
|
||||
|
||||
// if the package was imported before, use that one; otherwise create a new one
|
||||
p.pkgList = append(p.pkgList, fullName)
|
||||
return p.pkgList[len(p.pkgList)-1]
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) obj(tag int) {
|
||||
switch tag {
|
||||
case constTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ("")
|
||||
p.skipValue() // ignore const value, gocode's not interested
|
||||
p.callback(pkg, &ast.GenDecl{
|
||||
Tok: token.CONST,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
case typeTag:
|
||||
_ = p.typ("")
|
||||
|
||||
case varTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ("")
|
||||
p.callback(pkg, &ast.GenDecl{
|
||||
Tok: token.VAR,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
})
|
||||
case funcTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
p.callback(pkg, &ast.FuncDecl{
|
||||
Name: ast.NewIdent(name),
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
})
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected object tag %d", tag))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) pos() {
|
||||
if !p.posInfoFormat {
|
||||
return
|
||||
}
|
||||
|
||||
file := p.prevFile
|
||||
line := p.prevLine
|
||||
if delta := p.int(); delta != 0 {
|
||||
// line changed
|
||||
line += delta
|
||||
} else if n := p.int(); n >= 0 {
|
||||
// file changed
|
||||
file = p.prevFile[:n] + p.string()
|
||||
p.prevFile = file
|
||||
line = p.int()
|
||||
}
|
||||
p.prevLine = line
|
||||
|
||||
// TODO(gri) register new position
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) qualifiedName() (pkg string, name string) {
|
||||
name = p.string()
|
||||
pkg = p.pkg()
|
||||
return pkg, name
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) reserveMaybe() int {
|
||||
if p.trackAllTypes {
|
||||
p.typList = append(p.typList, nil)
|
||||
return len(p.typList) - 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) recordMaybe(idx int, t ast.Expr) ast.Expr {
|
||||
if idx == -1 {
|
||||
return t
|
||||
}
|
||||
p.typList[idx] = t
|
||||
return t
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) record(t ast.Expr) {
|
||||
p.typList = append(p.typList, t)
|
||||
}
|
||||
|
||||
// parent is the package which declared the type; parent == nil means
|
||||
// the package currently imported. The parent package is needed for
|
||||
// exported struct fields and interface methods which don't contain
|
||||
// explicit package information in the export data.
|
||||
func (p *gc_bin_parser) typ(parent string) ast.Expr {
|
||||
// if the type was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.typList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the type tag (< 0)
|
||||
switch i {
|
||||
case namedTag:
|
||||
// read type object
|
||||
p.pos()
|
||||
parent, name := p.qualifiedName()
|
||||
tdecl := &ast.GenDecl{
|
||||
Tok: token.TYPE,
|
||||
Specs: []ast.Spec{
|
||||
&ast.TypeSpec{
|
||||
Name: ast.NewIdent(name),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// record it right away (underlying type can contain refs to t)
|
||||
t := &ast.SelectorExpr{X: ast.NewIdent(parent), Sel: ast.NewIdent(name)}
|
||||
p.record(t)
|
||||
|
||||
// parse underlying type
|
||||
t0 := p.typ(parent)
|
||||
tdecl.Specs[0].(*ast.TypeSpec).Type = t0
|
||||
|
||||
p.callback(parent, tdecl)
|
||||
|
||||
// interfaces have no methods
|
||||
if _, ok := t0.(*ast.InterfaceType); ok {
|
||||
return t
|
||||
}
|
||||
|
||||
// read associated methods
|
||||
for i := p.int(); i > 0; i-- {
|
||||
// TODO(gri) replace this with something closer to fieldName
|
||||
p.pos()
|
||||
name := p.string()
|
||||
if !exported(name) {
|
||||
p.pkg()
|
||||
}
|
||||
|
||||
recv := p.paramList()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
p.int() // go:nointerface pragma - discarded
|
||||
|
||||
strip_method_receiver(recv)
|
||||
p.callback(parent, &ast.FuncDecl{
|
||||
Recv: recv,
|
||||
Name: ast.NewIdent(name),
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
})
|
||||
}
|
||||
return t
|
||||
case arrayTag:
|
||||
i := p.reserveMaybe()
|
||||
n := p.int64()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ArrayType{
|
||||
Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprint(n)},
|
||||
Elt: elt,
|
||||
})
|
||||
|
||||
case sliceTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ArrayType{Len: nil, Elt: elt})
|
||||
|
||||
case dddTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.Ellipsis{Elt: elt})
|
||||
|
||||
case structTag:
|
||||
i := p.reserveMaybe()
|
||||
return p.recordMaybe(i, p.structType(parent))
|
||||
|
||||
case pointerTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.StarExpr{X: elt})
|
||||
|
||||
case signatureTag:
|
||||
i := p.reserveMaybe()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
return p.recordMaybe(i, &ast.FuncType{Params: params, Results: results})
|
||||
|
||||
case interfaceTag:
|
||||
i := p.reserveMaybe()
|
||||
if p.int() != 0 {
|
||||
panic("unexpected embedded interface")
|
||||
}
|
||||
methods := p.methodList(parent)
|
||||
return p.recordMaybe(i, &ast.InterfaceType{Methods: &ast.FieldList{List: methods}})
|
||||
|
||||
case mapTag:
|
||||
i := p.reserveMaybe()
|
||||
key := p.typ(parent)
|
||||
val := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.MapType{Key: key, Value: val})
|
||||
|
||||
case chanTag:
|
||||
i := p.reserveMaybe()
|
||||
dir := ast.SEND | ast.RECV
|
||||
switch d := p.int(); d {
|
||||
case 1:
|
||||
dir = ast.RECV
|
||||
case 2:
|
||||
dir = ast.SEND
|
||||
case 3:
|
||||
// already set
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected channel dir %d", d))
|
||||
}
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ChanType{Dir: dir, Value: elt})
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type tag %d", i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) structType(parent string) *ast.StructType {
|
||||
var fields []*ast.Field
|
||||
if n := p.int(); n > 0 {
|
||||
fields = make([]*ast.Field, n)
|
||||
for i := range fields {
|
||||
fields[i] = p.field(parent)
|
||||
p.string() // tag, not interested in tags
|
||||
}
|
||||
}
|
||||
return &ast.StructType{Fields: &ast.FieldList{List: fields}}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) field(parent string) *ast.Field {
|
||||
p.pos()
|
||||
_, name := p.fieldName(parent)
|
||||
typ := p.typ(parent)
|
||||
|
||||
var names []*ast.Ident
|
||||
if name != "" {
|
||||
names = []*ast.Ident{ast.NewIdent(name)}
|
||||
}
|
||||
return &ast.Field{
|
||||
Names: names,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) methodList(parent string) (methods []*ast.Field) {
|
||||
if n := p.int(); n > 0 {
|
||||
methods = make([]*ast.Field, n)
|
||||
for i := range methods {
|
||||
methods[i] = p.method(parent)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) method(parent string) *ast.Field {
|
||||
p.pos()
|
||||
_, name := p.fieldName(parent)
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) fieldName(parent string) (string, string) {
|
||||
name := p.string()
|
||||
pkg := parent
|
||||
if p.version == 0 && name == "_" {
|
||||
// versions < 1 don't export a package for _ fields
|
||||
// TODO: remove once versions are not supported anymore
|
||||
return pkg, name
|
||||
}
|
||||
if name != "" && !exported(name) {
|
||||
// explicitly qualified field
|
||||
if name == "?" {
|
||||
name = ""
|
||||
}
|
||||
pkg = p.pkg()
|
||||
}
|
||||
return pkg, name
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) paramList() *ast.FieldList {
|
||||
n := p.int()
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
// negative length indicates unnamed parameters
|
||||
named := true
|
||||
if n < 0 {
|
||||
n = -n
|
||||
named = false
|
||||
}
|
||||
// n > 0
|
||||
flds := make([]*ast.Field, n)
|
||||
for i := range flds {
|
||||
flds[i] = p.param(named)
|
||||
}
|
||||
return &ast.FieldList{List: flds}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) param(named bool) *ast.Field {
|
||||
t := p.typ("")
|
||||
|
||||
name := "?"
|
||||
if named {
|
||||
name = p.string()
|
||||
if name == "" {
|
||||
panic("expected named parameter")
|
||||
}
|
||||
if name != "_" {
|
||||
p.pkg()
|
||||
}
|
||||
if i := strings.Index(name, "·"); i > 0 {
|
||||
name = name[:i] // cut off gc-specific parameter numbering
|
||||
}
|
||||
}
|
||||
|
||||
// read and discard compiler-specific info
|
||||
p.string()
|
||||
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: t,
|
||||
}
|
||||
}
|
||||
|
||||
func exported(name string) bool {
|
||||
ch, _ := utf8.DecodeRuneInString(name)
|
||||
return unicode.IsUpper(ch)
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) skipValue() {
|
||||
switch tag := p.tagOrIndex(); tag {
|
||||
case falseTag, trueTag:
|
||||
case int64Tag:
|
||||
p.int64()
|
||||
case floatTag:
|
||||
p.float()
|
||||
case complexTag:
|
||||
p.float()
|
||||
p.float()
|
||||
case stringTag:
|
||||
p.string()
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected value tag %d", tag))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) float() {
|
||||
sign := p.int()
|
||||
if sign == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
p.int() // exp
|
||||
p.string() // mant
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Low-level decoders
|
||||
|
||||
func (p *gc_bin_parser) tagOrIndex() int {
|
||||
if p.debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
|
||||
return int(p.rawInt64())
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) int() int {
|
||||
x := p.int64()
|
||||
if int64(int(x)) != x {
|
||||
panic("exported integer too large")
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) int64() int64 {
|
||||
if p.debugFormat {
|
||||
p.marker('i')
|
||||
}
|
||||
|
||||
return p.rawInt64()
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) string() string {
|
||||
if p.debugFormat {
|
||||
p.marker('s')
|
||||
}
|
||||
// if the string was seen before, i is its index (>= 0)
|
||||
// (the empty string is at index 0)
|
||||
i := p.rawInt64()
|
||||
if i >= 0 {
|
||||
return p.strList[i]
|
||||
}
|
||||
// otherwise, i is the negative string length (< 0)
|
||||
if n := int(-i); n <= cap(p.buf) {
|
||||
p.buf = p.buf[:n]
|
||||
} else {
|
||||
p.buf = make([]byte, n)
|
||||
}
|
||||
for i := range p.buf {
|
||||
p.buf[i] = p.rawByte()
|
||||
}
|
||||
s := string(p.buf)
|
||||
p.strList = append(p.strList, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) marker(want byte) {
|
||||
if got := p.rawByte(); got != want {
|
||||
panic(fmt.Sprintf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read))
|
||||
}
|
||||
|
||||
pos := p.read
|
||||
if n := int(p.rawInt64()); n != pos {
|
||||
panic(fmt.Sprintf("incorrect position: got %d; want %d", n, pos))
|
||||
}
|
||||
}
|
||||
|
||||
// rawInt64 should only be used by low-level decoders.
|
||||
func (p *gc_bin_parser) rawInt64() int64 {
|
||||
i, err := binary.ReadVarint(p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("read error: %v", err))
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// rawStringln should only be used to read the initial version string.
|
||||
func (p *gc_bin_parser) rawStringln(b byte) string {
|
||||
p.buf = p.buf[:0]
|
||||
for b != '\n' {
|
||||
p.buf = append(p.buf, b)
|
||||
b = p.rawByte()
|
||||
}
|
||||
return string(p.buf)
|
||||
}
|
||||
|
||||
// needed for binary.ReadVarint in rawInt64
|
||||
func (p *gc_bin_parser) ReadByte() (byte, error) {
|
||||
return p.rawByte(), nil
|
||||
}
|
||||
|
||||
// byte is the bottleneck interface for reading p.data.
|
||||
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
|
||||
// rawByte should only be used by low-level decoders.
|
||||
func (p *gc_bin_parser) rawByte() byte {
|
||||
b := p.data[0]
|
||||
r := 1
|
||||
if b == '|' {
|
||||
b = p.data[1]
|
||||
r = 2
|
||||
switch b {
|
||||
case 'S':
|
||||
b = '$'
|
||||
case '|':
|
||||
// nothing to do
|
||||
default:
|
||||
panic("unexpected escape sequence in export data")
|
||||
}
|
||||
}
|
||||
p.data = p.data[r:]
|
||||
p.read += r
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export format
|
||||
|
||||
// Tags. Must be < 0.
|
||||
const (
|
||||
// Objects
|
||||
packageTag = -(iota + 1)
|
||||
constTag
|
||||
typeTag
|
||||
varTag
|
||||
funcTag
|
||||
endTag
|
||||
|
||||
// Types
|
||||
namedTag
|
||||
arrayTag
|
||||
sliceTag
|
||||
dddTag
|
||||
structTag
|
||||
pointerTag
|
||||
signatureTag
|
||||
interfaceTag
|
||||
mapTag
|
||||
chanTag
|
||||
|
||||
// Values
|
||||
falseTag
|
||||
trueTag
|
||||
int64Tag
|
||||
floatTag
|
||||
fractionTag // not used by gc
|
||||
complexTag
|
||||
stringTag
|
||||
unknownTag // not used by gc (only appears in packages with errors)
|
||||
)
|
||||
|
||||
var predeclared = []ast.Expr{
|
||||
// basic types
|
||||
ast.NewIdent("bool"),
|
||||
ast.NewIdent("int"),
|
||||
ast.NewIdent("int8"),
|
||||
ast.NewIdent("int16"),
|
||||
ast.NewIdent("int32"),
|
||||
ast.NewIdent("int64"),
|
||||
ast.NewIdent("uint"),
|
||||
ast.NewIdent("uint8"),
|
||||
ast.NewIdent("uint16"),
|
||||
ast.NewIdent("uint32"),
|
||||
ast.NewIdent("uint64"),
|
||||
ast.NewIdent("uintptr"),
|
||||
ast.NewIdent("float32"),
|
||||
ast.NewIdent("float64"),
|
||||
ast.NewIdent("complex64"),
|
||||
ast.NewIdent("complex128"),
|
||||
ast.NewIdent("string"),
|
||||
|
||||
// aliases
|
||||
ast.NewIdent("byte"),
|
||||
ast.NewIdent("rune"),
|
||||
|
||||
// error
|
||||
ast.NewIdent("error"),
|
||||
|
||||
// TODO(nsf): don't think those are used in just package type info,
|
||||
// maybe for consts, but we are not interested in that
|
||||
// untyped types
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedBool],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedInt],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedRune],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedFloat],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedComplex],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedString],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
&ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")},
|
||||
|
||||
// invalid type
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
ast.NewIdent("any"),
|
||||
}
|
|
@ -1,678 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// gc_parser
|
||||
//
|
||||
// The following part of the code may contain portions of the code from the Go
|
||||
// standard library, which tells me to retain their copyright notice:
|
||||
//
|
||||
// Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type gc_parser struct {
|
||||
scanner scanner.Scanner
|
||||
tok rune
|
||||
lit string
|
||||
path_to_name map[string]string
|
||||
beautify bool
|
||||
pfc *package_file_cache
|
||||
}
|
||||
|
||||
func (p *gc_parser) init(data []byte, pfc *package_file_cache) {
|
||||
p.scanner.Init(bytes.NewReader(data))
|
||||
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
|
||||
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings |
|
||||
scanner.ScanComments | scanner.ScanChars | scanner.SkipComments
|
||||
p.scanner.Whitespace = 1<<'\t' | 1<<' ' | 1<<'\r' | 1<<'\v' | 1<<'\f'
|
||||
p.scanner.Filename = "package.go"
|
||||
p.next()
|
||||
// and the built-in "unsafe" package to the path_to_name map
|
||||
p.path_to_name = map[string]string{"unsafe": "unsafe"}
|
||||
p.pfc = pfc
|
||||
}
|
||||
|
||||
func (p *gc_parser) next() {
|
||||
p.tok = p.scanner.Scan()
|
||||
switch p.tok {
|
||||
case scanner.Ident, scanner.Int, scanner.String:
|
||||
p.lit = p.scanner.TokenText()
|
||||
default:
|
||||
p.lit = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_parser) error(msg string) {
|
||||
panic(errors.New(msg))
|
||||
}
|
||||
|
||||
func (p *gc_parser) errorf(format string, args ...interface{}) {
|
||||
p.error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect(tok rune) string {
|
||||
lit := p.lit
|
||||
if p.tok != tok {
|
||||
p.errorf("expected %s, got %s (%q)", scanner.TokenString(tok),
|
||||
scanner.TokenString(p.tok), lit)
|
||||
}
|
||||
p.next()
|
||||
return lit
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect_keyword(keyword string) {
|
||||
lit := p.expect(scanner.Ident)
|
||||
if lit != keyword {
|
||||
p.errorf("expected keyword: %s, got: %q", keyword, lit)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect_special(what string) {
|
||||
i := 0
|
||||
for i < len(what) {
|
||||
if p.tok != rune(what[i]) {
|
||||
break
|
||||
}
|
||||
|
||||
nc := p.scanner.Peek()
|
||||
if i != len(what)-1 && nc <= ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
p.next()
|
||||
i++
|
||||
}
|
||||
|
||||
if i < len(what) {
|
||||
p.errorf("expected: %q, got: %q", what, what[0:i])
|
||||
}
|
||||
}
|
||||
|
||||
// dotIdentifier = "?" | ( ident | '·' ) { ident | int | '·' } .
|
||||
// we're doing lexer job here, kind of
|
||||
func (p *gc_parser) parse_dot_ident() string {
|
||||
if p.tok == '?' {
|
||||
p.next()
|
||||
return "?"
|
||||
}
|
||||
|
||||
ident := ""
|
||||
sep := 'x'
|
||||
i, j := 0, -1
|
||||
for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
|
||||
ident += p.lit
|
||||
if p.tok == '·' {
|
||||
ident += "·"
|
||||
j = i
|
||||
i++
|
||||
}
|
||||
i += len(p.lit)
|
||||
sep = p.scanner.Peek()
|
||||
p.next()
|
||||
}
|
||||
// middot = \xc2\xb7
|
||||
if j != -1 && i > j+1 {
|
||||
c := ident[j+2]
|
||||
if c >= '0' && c <= '9' {
|
||||
ident = ident[0:j]
|
||||
}
|
||||
}
|
||||
return ident
|
||||
}
|
||||
|
||||
// ImportPath = string_lit .
|
||||
// quoted name of the path, but we return it as an identifier, taking an alias
|
||||
// from 'pathToAlias' map, it is filled by import statements
|
||||
func (p *gc_parser) parse_package() *ast.Ident {
|
||||
path, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ast.NewIdent(path)
|
||||
}
|
||||
|
||||
// ExportedName = "@" ImportPath "." dotIdentifier .
|
||||
func (p *gc_parser) parse_exported_name() *ast.SelectorExpr {
|
||||
p.expect('@')
|
||||
pkg := p.parse_package()
|
||||
if pkg.Name == "" {
|
||||
pkg.Name = "#" + p.pfc.defalias
|
||||
} else {
|
||||
pkg.Name = p.path_to_name[pkg.Name]
|
||||
}
|
||||
p.expect('.')
|
||||
name := ast.NewIdent(p.parse_dot_ident())
|
||||
return &ast.SelectorExpr{X: pkg, Sel: name}
|
||||
}
|
||||
|
||||
// Name = identifier | "?" | ExportedName .
|
||||
func (p *gc_parser) parse_name() (string, ast.Expr) {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
name := p.lit
|
||||
p.next()
|
||||
return name, ast.NewIdent(name)
|
||||
case '?':
|
||||
p.next()
|
||||
return "?", ast.NewIdent("?")
|
||||
case '@':
|
||||
en := p.parse_exported_name()
|
||||
return en.Sel.Name, en
|
||||
}
|
||||
p.error("name expected")
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Field = Name Type [ string_lit ] .
|
||||
func (p *gc_parser) parse_field() *ast.Field {
|
||||
var tag string
|
||||
name, _ := p.parse_name()
|
||||
typ := p.parse_type()
|
||||
if p.tok == scanner.String {
|
||||
tag = p.expect(scanner.String)
|
||||
}
|
||||
|
||||
var names []*ast.Ident
|
||||
if name != "?" {
|
||||
names = []*ast.Ident{ast.NewIdent(name)}
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Names: names,
|
||||
Type: typ,
|
||||
Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
|
||||
}
|
||||
}
|
||||
|
||||
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
||||
func (p *gc_parser) parse_parameter() *ast.Field {
|
||||
// name
|
||||
name, _ := p.parse_name()
|
||||
|
||||
// type
|
||||
var typ ast.Expr
|
||||
if p.tok == '.' {
|
||||
p.expect_special("...")
|
||||
typ = &ast.Ellipsis{Elt: p.parse_type()}
|
||||
} else {
|
||||
typ = p.parse_type()
|
||||
}
|
||||
|
||||
var tag string
|
||||
if p.tok == scanner.String {
|
||||
tag = p.expect(scanner.String)
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters = "(" [ ParameterList ] ")" .
|
||||
// ParameterList = { Parameter "," } Parameter .
|
||||
func (p *gc_parser) parse_parameters() *ast.FieldList {
|
||||
flds := []*ast.Field{}
|
||||
parse_parameter := func() {
|
||||
par := p.parse_parameter()
|
||||
flds = append(flds, par)
|
||||
}
|
||||
|
||||
p.expect('(')
|
||||
if p.tok != ')' {
|
||||
parse_parameter()
|
||||
for p.tok == ',' {
|
||||
p.next()
|
||||
parse_parameter()
|
||||
}
|
||||
}
|
||||
p.expect(')')
|
||||
return &ast.FieldList{List: flds}
|
||||
}
|
||||
|
||||
// Signature = Parameters [ Result ] .
|
||||
// Result = Type | Parameters .
|
||||
func (p *gc_parser) parse_signature() *ast.FuncType {
|
||||
var params *ast.FieldList
|
||||
var results *ast.FieldList
|
||||
|
||||
params = p.parse_parameters()
|
||||
switch p.tok {
|
||||
case scanner.Ident, '[', '*', '<', '@':
|
||||
fld := &ast.Field{Type: p.parse_type()}
|
||||
results = &ast.FieldList{List: []*ast.Field{fld}}
|
||||
case '(':
|
||||
results = p.parse_parameters()
|
||||
}
|
||||
return &ast.FuncType{Params: params, Results: results}
|
||||
}
|
||||
|
||||
// MethodOrEmbedSpec = Name [ Signature ] .
|
||||
func (p *gc_parser) parse_method_or_embed_spec() *ast.Field {
|
||||
name, nameexpr := p.parse_name()
|
||||
if p.tok == '(' {
|
||||
typ := p.parse_signature()
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Type: nameexpr,
|
||||
}
|
||||
}
|
||||
|
||||
// int_lit = [ "-" | "+" ] { "0" ... "9" } .
|
||||
func (p *gc_parser) parse_int() {
|
||||
switch p.tok {
|
||||
case '-', '+':
|
||||
p.next()
|
||||
}
|
||||
p.expect(scanner.Int)
|
||||
}
|
||||
|
||||
// number = int_lit [ "p" int_lit ] .
|
||||
func (p *gc_parser) parse_number() {
|
||||
p.parse_int()
|
||||
if p.lit == "p" {
|
||||
p.next()
|
||||
p.parse_int()
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// gc_parser.types
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
// InterfaceType = "interface" "{" [ MethodOrEmbedList ] "}" .
|
||||
// MethodOrEmbedList = MethodOrEmbedSpec { ";" MethodOrEmbedSpec } .
|
||||
func (p *gc_parser) parse_interface_type() ast.Expr {
|
||||
var methods []*ast.Field
|
||||
parse_method := func() {
|
||||
meth := p.parse_method_or_embed_spec()
|
||||
methods = append(methods, meth)
|
||||
}
|
||||
|
||||
p.expect_keyword("interface")
|
||||
p.expect('{')
|
||||
if p.tok != '}' {
|
||||
parse_method()
|
||||
for p.tok == ';' {
|
||||
p.next()
|
||||
parse_method()
|
||||
}
|
||||
}
|
||||
p.expect('}')
|
||||
return &ast.InterfaceType{Methods: &ast.FieldList{List: methods}}
|
||||
}
|
||||
|
||||
// StructType = "struct" "{" [ FieldList ] "}" .
|
||||
// FieldList = Field { ";" Field } .
|
||||
func (p *gc_parser) parse_struct_type() ast.Expr {
|
||||
var fields []*ast.Field
|
||||
parse_field := func() {
|
||||
fld := p.parse_field()
|
||||
fields = append(fields, fld)
|
||||
}
|
||||
|
||||
p.expect_keyword("struct")
|
||||
p.expect('{')
|
||||
if p.tok != '}' {
|
||||
parse_field()
|
||||
for p.tok == ';' {
|
||||
p.next()
|
||||
parse_field()
|
||||
}
|
||||
}
|
||||
p.expect('}')
|
||||
return &ast.StructType{Fields: &ast.FieldList{List: fields}}
|
||||
}
|
||||
|
||||
// MapType = "map" "[" Type "]" Type .
|
||||
func (p *gc_parser) parse_map_type() ast.Expr {
|
||||
p.expect_keyword("map")
|
||||
p.expect('[')
|
||||
key := p.parse_type()
|
||||
p.expect(']')
|
||||
elt := p.parse_type()
|
||||
return &ast.MapType{Key: key, Value: elt}
|
||||
}
|
||||
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
func (p *gc_parser) parse_chan_type() ast.Expr {
|
||||
dir := ast.SEND | ast.RECV
|
||||
if p.tok == scanner.Ident {
|
||||
p.expect_keyword("chan")
|
||||
if p.tok == '<' {
|
||||
p.expect_special("<-")
|
||||
dir = ast.SEND
|
||||
}
|
||||
} else {
|
||||
p.expect_special("<-")
|
||||
p.expect_keyword("chan")
|
||||
dir = ast.RECV
|
||||
}
|
||||
|
||||
elt := p.parse_type()
|
||||
return &ast.ChanType{Dir: dir, Value: elt}
|
||||
}
|
||||
|
||||
// ArrayOrSliceType = ArrayType | SliceType .
|
||||
// ArrayType = "[" int_lit "]" Type .
|
||||
// SliceType = "[" "]" Type .
|
||||
func (p *gc_parser) parse_array_or_slice_type() ast.Expr {
|
||||
p.expect('[')
|
||||
if p.tok == ']' {
|
||||
// SliceType
|
||||
p.next() // skip ']'
|
||||
return &ast.ArrayType{Len: nil, Elt: p.parse_type()}
|
||||
}
|
||||
|
||||
// ArrayType
|
||||
lit := p.expect(scanner.Int)
|
||||
p.expect(']')
|
||||
return &ast.ArrayType{
|
||||
Len: &ast.BasicLit{Kind: token.INT, Value: lit},
|
||||
Elt: p.parse_type(),
|
||||
}
|
||||
}
|
||||
|
||||
// Type =
|
||||
// BasicType | TypeName | ArrayType | SliceType | StructType |
|
||||
// PointerType | FuncType | InterfaceType | MapType | ChanType |
|
||||
// "(" Type ")" .
|
||||
// BasicType = ident .
|
||||
// TypeName = ExportedName .
|
||||
// SliceType = "[" "]" Type .
|
||||
// PointerType = "*" Type .
|
||||
// FuncType = "func" Signature .
|
||||
func (p *gc_parser) parse_type() ast.Expr {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
switch p.lit {
|
||||
case "struct":
|
||||
return p.parse_struct_type()
|
||||
case "func":
|
||||
p.next()
|
||||
return p.parse_signature()
|
||||
case "interface":
|
||||
return p.parse_interface_type()
|
||||
case "map":
|
||||
return p.parse_map_type()
|
||||
case "chan":
|
||||
return p.parse_chan_type()
|
||||
default:
|
||||
lit := p.lit
|
||||
p.next()
|
||||
return ast.NewIdent(lit)
|
||||
}
|
||||
case '@':
|
||||
return p.parse_exported_name()
|
||||
case '[':
|
||||
return p.parse_array_or_slice_type()
|
||||
case '*':
|
||||
p.next()
|
||||
return &ast.StarExpr{X: p.parse_type()}
|
||||
case '<':
|
||||
return p.parse_chan_type()
|
||||
case '(':
|
||||
p.next()
|
||||
typ := p.parse_type()
|
||||
p.expect(')')
|
||||
return typ
|
||||
}
|
||||
p.errorf("unexpected token: %s", scanner.TokenString(p.tok))
|
||||
return nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// gc_parser.declarations
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
// ImportDecl = "import" identifier string_lit .
|
||||
func (p *gc_parser) parse_import_decl() {
|
||||
p.expect_keyword("import")
|
||||
alias := p.expect(scanner.Ident)
|
||||
path := p.parse_package()
|
||||
fullName := "!" + path.Name + "!" + alias
|
||||
p.path_to_name[path.Name] = fullName
|
||||
p.pfc.add_package_to_scope(fullName, path.Name)
|
||||
}
|
||||
|
||||
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
|
||||
// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
|
||||
// bool_lit = "true" | "false" .
|
||||
// complex_lit = "(" float_lit "+" float_lit ")" .
|
||||
// rune_lit = "(" int_lit "+" int_lit ")" .
|
||||
// string_lit = `"` { unicode_char } `"` .
|
||||
func (p *gc_parser) parse_const_decl() (string, *ast.GenDecl) {
|
||||
// TODO: do we really need actual const value? gocode doesn't use this
|
||||
p.expect_keyword("const")
|
||||
name := p.parse_exported_name()
|
||||
|
||||
var typ ast.Expr
|
||||
if p.tok != '=' {
|
||||
typ = p.parse_type()
|
||||
}
|
||||
|
||||
p.expect('=')
|
||||
|
||||
// skip the value
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
// must be bool, true or false
|
||||
p.next()
|
||||
case '-', '+', scanner.Int:
|
||||
// number
|
||||
p.parse_number()
|
||||
case '(':
|
||||
// complex_lit or rune_lit
|
||||
p.next() // skip '('
|
||||
if p.tok == scanner.Char {
|
||||
p.next()
|
||||
} else {
|
||||
p.parse_number()
|
||||
}
|
||||
p.expect('+')
|
||||
p.parse_number()
|
||||
p.expect(')')
|
||||
case scanner.Char:
|
||||
p.next()
|
||||
case scanner.String:
|
||||
p.next()
|
||||
default:
|
||||
p.error("expected literal")
|
||||
}
|
||||
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.CONST,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{name.Sel},
|
||||
Type: typ,
|
||||
Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TypeDecl = "type" ExportedName Type .
|
||||
func (p *gc_parser) parse_type_decl() (string, *ast.GenDecl) {
|
||||
p.expect_keyword("type")
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_type()
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.TYPE,
|
||||
Specs: []ast.Spec{
|
||||
&ast.TypeSpec{
|
||||
Name: name.Sel,
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// VarDecl = "var" ExportedName Type .
|
||||
func (p *gc_parser) parse_var_decl() (string, *ast.GenDecl) {
|
||||
p.expect_keyword("var")
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_type()
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.VAR,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{name.Sel},
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FuncBody = "{" ... "}" .
|
||||
func (p *gc_parser) parse_func_body() {
|
||||
p.expect('{')
|
||||
for i := 1; i > 0; p.next() {
|
||||
switch p.tok {
|
||||
case '{':
|
||||
i++
|
||||
case '}':
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FuncDecl = "func" ExportedName Signature [ FuncBody ] .
|
||||
func (p *gc_parser) parse_func_decl() (string, *ast.FuncDecl) {
|
||||
// "func" was already consumed by lookahead
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_signature()
|
||||
if p.tok == '{' {
|
||||
p.parse_func_body()
|
||||
}
|
||||
return name.X.(*ast.Ident).Name, &ast.FuncDecl{
|
||||
Name: name.Sel,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func strip_method_receiver(recv *ast.FieldList) string {
|
||||
var sel *ast.SelectorExpr
|
||||
|
||||
// find selector expression
|
||||
typ := recv.List[0].Type
|
||||
switch t := typ.(type) {
|
||||
case *ast.StarExpr:
|
||||
sel = t.X.(*ast.SelectorExpr)
|
||||
case *ast.SelectorExpr:
|
||||
sel = t
|
||||
}
|
||||
|
||||
// extract package path
|
||||
pkg := sel.X.(*ast.Ident).Name
|
||||
|
||||
// write back stripped type
|
||||
switch t := typ.(type) {
|
||||
case *ast.StarExpr:
|
||||
t.X = sel.Sel
|
||||
case *ast.SelectorExpr:
|
||||
recv.List[0].Type = sel.Sel
|
||||
}
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
// MethodDecl = "func" Receiver Name Signature .
|
||||
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" [ FuncBody ] .
|
||||
func (p *gc_parser) parse_method_decl() (string, *ast.FuncDecl) {
|
||||
recv := p.parse_parameters()
|
||||
pkg := strip_method_receiver(recv)
|
||||
name, _ := p.parse_name()
|
||||
typ := p.parse_signature()
|
||||
if p.tok == '{' {
|
||||
p.parse_func_body()
|
||||
}
|
||||
return pkg, &ast.FuncDecl{
|
||||
Recv: recv,
|
||||
Name: ast.NewIdent(name),
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
|
||||
func (p *gc_parser) parse_decl() (pkg string, decl ast.Decl) {
|
||||
switch p.lit {
|
||||
case "import":
|
||||
p.parse_import_decl()
|
||||
case "const":
|
||||
pkg, decl = p.parse_const_decl()
|
||||
case "type":
|
||||
pkg, decl = p.parse_type_decl()
|
||||
case "var":
|
||||
pkg, decl = p.parse_var_decl()
|
||||
case "func":
|
||||
p.next()
|
||||
if p.tok == '(' {
|
||||
pkg, decl = p.parse_method_decl()
|
||||
} else {
|
||||
pkg, decl = p.parse_func_decl()
|
||||
}
|
||||
}
|
||||
p.expect('\n')
|
||||
return
|
||||
}
|
||||
|
||||
// Export = PackageClause { Decl } "$$" .
|
||||
// PackageClause = "package" identifier [ "safe" ] "\n" .
|
||||
func (p *gc_parser) parse_export(callback func(string, ast.Decl)) {
|
||||
p.expect_keyword("package")
|
||||
p.pfc.defalias = p.expect(scanner.Ident)
|
||||
if p.tok != '\n' {
|
||||
p.expect_keyword("safe")
|
||||
}
|
||||
p.expect('\n')
|
||||
|
||||
for p.tok != '$' && p.tok != scanner.EOF {
|
||||
pkg, decl := p.parse_decl()
|
||||
if decl != nil {
|
||||
callback(pkg, decl)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
// +build !go1.7
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
knownPackageIdents["context"] = "golang.org/x/net/context"
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// All the code in this file serves single purpose:
|
||||
// It separates a function with the cursor inside and the rest of the code. I'm
|
||||
// doing that, because sometimes parser is not able to recover itself from an
|
||||
// error and the autocompletion results become less complete.
|
||||
|
||||
type tok_pos_pair struct {
|
||||
tok token.Token
|
||||
pos token.Pos
|
||||
}
|
||||
|
||||
type tok_collection struct {
|
||||
tokens []tok_pos_pair
|
||||
fset *token.FileSet
|
||||
}
|
||||
|
||||
func (this *tok_collection) next(s *scanner.Scanner) bool {
|
||||
pos, tok, _ := s.Scan()
|
||||
if tok == token.EOF {
|
||||
return false
|
||||
}
|
||||
|
||||
this.tokens = append(this.tokens, tok_pos_pair{tok, pos})
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_decl_beg(pos int) int {
|
||||
lowest := 0
|
||||
lowpos := -1
|
||||
lowi := -1
|
||||
cur := 0
|
||||
for i := pos; i >= 0; i-- {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
|
||||
if cur < lowest {
|
||||
lowest = cur
|
||||
lowpos = this.fset.Position(t.pos).Offset
|
||||
lowi = i
|
||||
}
|
||||
}
|
||||
|
||||
cur = lowest
|
||||
for i := lowi - 1; i >= 0; i-- {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
if t.tok == token.SEMICOLON && cur == lowest {
|
||||
lowpos = this.fset.Position(t.pos).Offset
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return lowpos
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_decl_end(pos int) int {
|
||||
highest := 0
|
||||
highpos := -1
|
||||
cur := 0
|
||||
|
||||
if this.tokens[pos].tok == token.LBRACE {
|
||||
pos++
|
||||
}
|
||||
|
||||
for i := pos; i < len(this.tokens); i++ {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
|
||||
if cur > highest {
|
||||
highest = cur
|
||||
highpos = this.fset.Position(t.pos).Offset
|
||||
}
|
||||
}
|
||||
|
||||
return highpos
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_outermost_scope(cursor int) (int, int) {
|
||||
pos := 0
|
||||
|
||||
for i, t := range this.tokens {
|
||||
if cursor <= this.fset.Position(t.pos).Offset {
|
||||
break
|
||||
}
|
||||
pos = i
|
||||
}
|
||||
|
||||
return this.find_decl_beg(pos), this.find_decl_end(pos)
|
||||
}
|
||||
|
||||
// return new cursor position, file without ripped part and the ripped part itself
|
||||
// variants:
|
||||
// new-cursor, file-without-ripped-part, ripped-part
|
||||
// old-cursor, file, nil
|
||||
func (this *tok_collection) rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
|
||||
this.fset = token.NewFileSet()
|
||||
var s scanner.Scanner
|
||||
s.Init(this.fset.AddFile("", this.fset.Base(), len(file)), file, nil, scanner.ScanComments)
|
||||
for this.next(&s) {
|
||||
}
|
||||
|
||||
beg, end := this.find_outermost_scope(cursor)
|
||||
if beg == -1 || end == -1 {
|
||||
return cursor, file, nil
|
||||
}
|
||||
|
||||
ripped := make([]byte, end+1-beg)
|
||||
copy(ripped, file[beg:end+1])
|
||||
|
||||
newfile := make([]byte, len(file)-len(ripped))
|
||||
copy(newfile, file[:beg])
|
||||
copy(newfile[beg:], file[end+1:])
|
||||
|
||||
return cursor - beg, newfile, ripped
|
||||
}
|
||||
|
||||
func rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
|
||||
var tc tok_collection
|
||||
return tc.rip_off_decl(file, cursor)
|
||||
}
|
|
@ -1,138 +0,0 @@
|
|||
// WARNING! Autogenerated by goremote, don't touch.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
)
|
||||
|
||||
type RPC struct {
|
||||
}
|
||||
|
||||
// wrapper for: server_auto_complete
|
||||
|
||||
type Args_auto_complete struct {
|
||||
Arg0 []byte
|
||||
Arg1 string
|
||||
Arg2 int
|
||||
Arg3 go_build_context
|
||||
}
|
||||
type Reply_auto_complete struct {
|
||||
Arg0 []candidate
|
||||
Arg1 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_auto_complete(args *Args_auto_complete, reply *Reply_auto_complete) error {
|
||||
reply.Arg0, reply.Arg1 = server_auto_complete(args.Arg0, args.Arg1, args.Arg2, args.Arg3)
|
||||
return nil
|
||||
}
|
||||
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 go_build_context) (c []candidate, d int) {
|
||||
var args Args_auto_complete
|
||||
var reply Reply_auto_complete
|
||||
args.Arg0 = Arg0
|
||||
args.Arg1 = Arg1
|
||||
args.Arg2 = Arg2
|
||||
args.Arg3 = Arg3
|
||||
err := cli.Call("RPC.RPC_auto_complete", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0, reply.Arg1
|
||||
}
|
||||
|
||||
// wrapper for: server_close
|
||||
|
||||
type Args_close struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_close struct {
|
||||
Arg0 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_close(args *Args_close, reply *Reply_close) error {
|
||||
reply.Arg0 = server_close(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_close(cli *rpc.Client, Arg0 int) int {
|
||||
var args Args_close
|
||||
var reply Reply_close
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_close", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_status
|
||||
|
||||
type Args_status struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_status struct {
|
||||
Arg0 string
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_status(args *Args_status, reply *Reply_status) error {
|
||||
reply.Arg0 = server_status(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_status(cli *rpc.Client, Arg0 int) string {
|
||||
var args Args_status
|
||||
var reply Reply_status
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_status", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_drop_cache
|
||||
|
||||
type Args_drop_cache struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_drop_cache struct {
|
||||
Arg0 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_drop_cache(args *Args_drop_cache, reply *Reply_drop_cache) error {
|
||||
reply.Arg0 = server_drop_cache(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_drop_cache(cli *rpc.Client, Arg0 int) int {
|
||||
var args Args_drop_cache
|
||||
var reply Reply_drop_cache
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_drop_cache", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_set
|
||||
|
||||
type Args_set struct {
|
||||
Arg0, Arg1 string
|
||||
}
|
||||
type Reply_set struct {
|
||||
Arg0 string
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_set(args *Args_set, reply *Reply_set) error {
|
||||
reply.Arg0 = server_set(args.Arg0, args.Arg1)
|
||||
return nil
|
||||
}
|
||||
func client_set(cli *rpc.Client, Arg0, Arg1 string) string {
|
||||
var args Args_set
|
||||
var reply Reply_set
|
||||
args.Arg0 = Arg0
|
||||
args.Arg1 = Arg1
|
||||
err := cli.Call("RPC.RPC_set", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package main
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// scope
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type scope struct {
|
||||
parent *scope // nil for universe scope
|
||||
entities map[string]*decl
|
||||
}
|
||||
|
||||
func new_scope(outer *scope) *scope {
|
||||
s := new(scope)
|
||||
s.parent = outer
|
||||
s.entities = make(map[string]*decl)
|
||||
return s
|
||||
}
|
||||
|
||||
// returns: new, prev
|
||||
func advance_scope(s *scope) (*scope, *scope) {
|
||||
if len(s.entities) == 0 {
|
||||
return s, s.parent
|
||||
}
|
||||
return new_scope(s), s
|
||||
}
|
||||
|
||||
// adds declaration or returns an existing one
|
||||
func (s *scope) add_named_decl(d *decl) *decl {
|
||||
return s.add_decl(d.name, d)
|
||||
}
|
||||
|
||||
func (s *scope) add_decl(name string, d *decl) *decl {
|
||||
decl, ok := s.entities[name]
|
||||
if !ok {
|
||||
s.entities[name] = d
|
||||
return d
|
||||
}
|
||||
return decl
|
||||
}
|
||||
|
||||
func (s *scope) replace_decl(name string, d *decl) {
|
||||
s.entities[name] = d
|
||||
}
|
||||
|
||||
func (s *scope) merge_decl(d *decl) {
|
||||
decl, ok := s.entities[d.name]
|
||||
if !ok {
|
||||
s.entities[d.name] = d
|
||||
} else {
|
||||
decl := decl.deep_copy()
|
||||
decl.expand_or_replace(d)
|
||||
s.entities[d.name] = decl
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scope) lookup(name string) *decl {
|
||||
decl, ok := s.entities[name]
|
||||
if !ok {
|
||||
if s.parent != nil {
|
||||
return s.parent.lookup(name)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return decl
|
||||
}
|
|
@ -1,237 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"net"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
func do_server() int {
|
||||
g_config.read()
|
||||
if g_config.ForceDebugOutput != "" {
|
||||
// forcefully enable debugging and redirect logging into the
|
||||
// specified file
|
||||
*g_debug = true
|
||||
f, err := os.Create(g_config.ForceDebugOutput)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
}
|
||||
|
||||
addr := *g_addr
|
||||
if *g_sock == "unix" {
|
||||
addr = get_socket_filename()
|
||||
if file_exists(addr) {
|
||||
log.Printf("unix socket: '%s' already exists\n", addr)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
g_daemon = new_daemon(*g_sock, addr)
|
||||
if *g_sock == "unix" {
|
||||
// cleanup unix socket file
|
||||
defer os.Remove(addr)
|
||||
}
|
||||
|
||||
rpc.Register(new(RPC))
|
||||
|
||||
g_daemon.loop()
|
||||
return 0
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// daemon
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type daemon struct {
|
||||
listener net.Listener
|
||||
cmd_in chan int
|
||||
autocomplete *auto_complete_context
|
||||
pkgcache package_cache
|
||||
declcache *decl_cache
|
||||
context package_lookup_context
|
||||
}
|
||||
|
||||
func new_daemon(network, address string) *daemon {
|
||||
var err error
|
||||
|
||||
d := new(daemon)
|
||||
d.listener, err = net.Listen(network, address)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.cmd_in = make(chan int, 1)
|
||||
d.pkgcache = new_package_cache()
|
||||
d.declcache = new_decl_cache(&d.context)
|
||||
d.autocomplete = new_auto_complete_context(d.pkgcache, d.declcache)
|
||||
return d
|
||||
}
|
||||
|
||||
func (this *daemon) drop_cache() {
|
||||
this.pkgcache = new_package_cache()
|
||||
this.declcache = new_decl_cache(&this.context)
|
||||
this.autocomplete = new_auto_complete_context(this.pkgcache, this.declcache)
|
||||
}
|
||||
|
||||
const (
|
||||
daemon_close = iota
|
||||
)
|
||||
|
||||
func (this *daemon) loop() {
|
||||
conn_in := make(chan net.Conn)
|
||||
go func() {
|
||||
for {
|
||||
c, err := this.listener.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conn_in <- c
|
||||
}
|
||||
}()
|
||||
|
||||
timeout := time.Duration(g_config.CloseTimeout) * time.Second
|
||||
countdown := time.NewTimer(timeout)
|
||||
|
||||
for {
|
||||
// handle connections or server CMDs (currently one CMD)
|
||||
select {
|
||||
case c := <-conn_in:
|
||||
rpc.ServeConn(c)
|
||||
countdown.Reset(timeout)
|
||||
runtime.GC()
|
||||
case cmd := <-this.cmd_in:
|
||||
switch cmd {
|
||||
case daemon_close:
|
||||
return
|
||||
}
|
||||
case <-countdown.C:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *daemon) close() {
|
||||
this.cmd_in <- daemon_close
|
||||
}
|
||||
|
||||
var g_daemon *daemon
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// server_* functions
|
||||
//
|
||||
// Corresponding client_* functions are autogenerated by goremote.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func server_auto_complete(file []byte, filename string, cursor int, context_packed go_build_context) (c []candidate, d int) {
|
||||
context := unpack_build_context(&context_packed)
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
c = []candidate{
|
||||
{"PANIC", "PANIC", decl_invalid},
|
||||
}
|
||||
|
||||
// drop cache
|
||||
g_daemon.drop_cache()
|
||||
}
|
||||
}()
|
||||
// TODO: Probably we don't care about comparing all the fields, checking GOROOT and GOPATH
|
||||
// should be enough.
|
||||
if !reflect.DeepEqual(g_daemon.context.Context, context.Context) {
|
||||
g_daemon.context = context
|
||||
g_daemon.drop_cache()
|
||||
}
|
||||
switch g_config.PackageLookupMode {
|
||||
case "bzl":
|
||||
// when package lookup mode is bzl, we set GOPATH to "" explicitly and
|
||||
// BzlProjectRoot becomes valid (or empty)
|
||||
var err error
|
||||
g_daemon.context.GOPATH = ""
|
||||
g_daemon.context.BzlProjectRoot, err = find_bzl_project_root(g_config.LibPath, filename)
|
||||
if *g_debug && err != nil {
|
||||
log.Printf("Bzl project root not found: %s", err)
|
||||
}
|
||||
case "gb":
|
||||
// when package lookup mode is gb, we set GOPATH to "" explicitly and
|
||||
// GBProjectRoot becomes valid (or empty)
|
||||
var err error
|
||||
g_daemon.context.GOPATH = ""
|
||||
g_daemon.context.GBProjectRoot, err = find_gb_project_root(filename)
|
||||
if *g_debug && err != nil {
|
||||
log.Printf("Gb project root not found: %s", err)
|
||||
}
|
||||
case "go":
|
||||
// get current package path for GO15VENDOREXPERIMENT hack
|
||||
g_daemon.context.CurrentPackagePath = ""
|
||||
pkg, err := g_daemon.context.ImportDir(filepath.Dir(filename), build.FindOnly)
|
||||
if err == nil {
|
||||
if *g_debug {
|
||||
log.Printf("Go project path: %s", pkg.ImportPath)
|
||||
}
|
||||
g_daemon.context.CurrentPackagePath = pkg.ImportPath
|
||||
} else if *g_debug {
|
||||
log.Printf("Go project path not found: %s", err)
|
||||
}
|
||||
}
|
||||
if *g_debug {
|
||||
var buf bytes.Buffer
|
||||
log.Printf("Got autocompletion request for '%s'\n", filename)
|
||||
log.Printf("Cursor at: %d\n", cursor)
|
||||
buf.WriteString("-------------------------------------------------------\n")
|
||||
buf.Write(file[:cursor])
|
||||
buf.WriteString("#")
|
||||
buf.Write(file[cursor:])
|
||||
log.Print(buf.String())
|
||||
log.Println("-------------------------------------------------------")
|
||||
}
|
||||
candidates, d := g_daemon.autocomplete.apropos(file, filename, cursor)
|
||||
if *g_debug {
|
||||
log.Printf("Offset: %d\n", d)
|
||||
log.Printf("Number of candidates found: %d\n", len(candidates))
|
||||
log.Printf("Candidates are:\n")
|
||||
for _, c := range candidates {
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
log.Printf(" %s\n", abbr)
|
||||
}
|
||||
log.Println("=======================================================")
|
||||
}
|
||||
return candidates, d
|
||||
}
|
||||
|
||||
func server_close(notused int) int {
|
||||
g_daemon.close()
|
||||
return 0
|
||||
}
|
||||
|
||||
func server_status(notused int) string {
|
||||
return g_daemon.autocomplete.status()
|
||||
}
|
||||
|
||||
func server_drop_cache(notused int) int {
|
||||
// drop cache
|
||||
g_daemon.drop_cache()
|
||||
return 0
|
||||
}
|
||||
|
||||
func server_set(key, value string) string {
|
||||
if key == "\x00" {
|
||||
return g_config.list()
|
||||
} else if value == "\x00" {
|
||||
return g_config.list_option(key)
|
||||
}
|
||||
// drop cache on settings changes
|
||||
g_daemon.drop_cache()
|
||||
return g_config.set_option(key, value)
|
||||
}
|
|
@ -1,287 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// our own readdir, which skips the files it cannot lstat
|
||||
func readdir_lstat(name string) ([]os.FileInfo, error) {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]os.FileInfo, 0, len(names))
|
||||
for _, lname := range names {
|
||||
s, err := os.Lstat(filepath.Join(name, lname))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
out = append(out, s)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// our other readdir function, only opens and reads
|
||||
func readdir(dirname string) []os.FileInfo {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// returns truncated 'data' and amount of bytes skipped (for cursor pos adjustment)
|
||||
func filter_out_shebang(data []byte) ([]byte, int) {
|
||||
if len(data) > 2 && data[0] == '#' && data[1] == '!' {
|
||||
newline := bytes.Index(data, []byte("\n"))
|
||||
if newline != -1 && len(data) > newline+1 {
|
||||
return data[newline+1:], newline + 1
|
||||
}
|
||||
}
|
||||
return data, 0
|
||||
}
|
||||
|
||||
func file_exists(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func is_dir(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
||||
|
||||
func char_to_byte_offset(s []byte, offset_c int) (offset_b int) {
|
||||
for offset_b = 0; offset_c > 0 && offset_b < len(s); offset_b++ {
|
||||
if utf8.RuneStart(s[offset_b]) {
|
||||
offset_c--
|
||||
}
|
||||
}
|
||||
return offset_b
|
||||
}
|
||||
|
||||
func xdg_home_dir() string {
|
||||
xdghome := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdghome == "" {
|
||||
xdghome = filepath.Join(os.Getenv("HOME"), ".config")
|
||||
}
|
||||
return xdghome
|
||||
}
|
||||
|
||||
func has_prefix(s, prefix string, ignorecase bool) bool {
|
||||
if ignorecase {
|
||||
s = strings.ToLower(s)
|
||||
prefix = strings.ToLower(prefix)
|
||||
}
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
|
||||
func find_bzl_project_root(libpath, path string) (string, error) {
|
||||
if libpath == "" {
|
||||
return "", fmt.Errorf("could not find project root, libpath is empty")
|
||||
}
|
||||
|
||||
pathMap := map[string]struct{}{}
|
||||
for _, lp := range strings.Split(libpath, ":") {
|
||||
lp := strings.TrimSpace(lp)
|
||||
pathMap[filepath.Clean(lp)] = struct{}{}
|
||||
}
|
||||
|
||||
path = filepath.Dir(path)
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("project root is blank")
|
||||
}
|
||||
|
||||
start := path
|
||||
for path != "/" {
|
||||
if _, ok := pathMap[filepath.Clean(path)]; ok {
|
||||
return path, nil
|
||||
}
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
return "", fmt.Errorf("could not find project root in %q or its parents", start)
|
||||
}
|
||||
|
||||
// Code taken directly from `gb`, I hope author doesn't mind.
|
||||
func find_gb_project_root(path string) (string, error) {
|
||||
path = filepath.Dir(path)
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("project root is blank")
|
||||
}
|
||||
start := path
|
||||
for path != "/" {
|
||||
root := filepath.Join(path, "src")
|
||||
if _, err := os.Stat(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
path = filepath.Dir(path)
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
path, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
return "", fmt.Errorf("could not find project root in %q or its parents", start)
|
||||
}
|
||||
|
||||
// vendorlessImportPath returns the devendorized version of the provided import path.
|
||||
// e.g. "foo/bar/vendor/a/b" => "a/b"
|
||||
func vendorlessImportPath(ipath string) string {
|
||||
// Devendorize for use in import statement.
|
||||
if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
|
||||
return ipath[i+len("/vendor/"):]
|
||||
}
|
||||
if strings.HasPrefix(ipath, "vendor/") {
|
||||
return ipath[len("vendor/"):]
|
||||
}
|
||||
return ipath
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// print_backtrace
|
||||
//
|
||||
// a nicer backtrace printer than the default one
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
var g_backtrace_mutex sync.Mutex
|
||||
|
||||
func print_backtrace(err interface{}) {
|
||||
g_backtrace_mutex.Lock()
|
||||
defer g_backtrace_mutex.Unlock()
|
||||
fmt.Printf("panic: %v\n", err)
|
||||
i := 2
|
||||
for {
|
||||
pc, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
f := runtime.FuncForPC(pc)
|
||||
fmt.Printf("%d(%s): %s:%d\n", i-1, f.Name(), file, line)
|
||||
i++
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// File reader goroutine
|
||||
//
|
||||
// It's a bad idea to block multiple goroutines on file I/O. Creates many
|
||||
// threads which fight for HDD. Therefore only single goroutine should read HDD
|
||||
// at the same time.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type file_read_request struct {
|
||||
filename string
|
||||
out chan file_read_response
|
||||
}
|
||||
|
||||
type file_read_response struct {
|
||||
data []byte
|
||||
error error
|
||||
}
|
||||
|
||||
type file_reader_type struct {
|
||||
in chan file_read_request
|
||||
}
|
||||
|
||||
func new_file_reader() *file_reader_type {
|
||||
this := new(file_reader_type)
|
||||
this.in = make(chan file_read_request)
|
||||
go func() {
|
||||
var rsp file_read_response
|
||||
for {
|
||||
req := <-this.in
|
||||
rsp.data, rsp.error = ioutil.ReadFile(req.filename)
|
||||
req.out <- rsp
|
||||
}
|
||||
}()
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *file_reader_type) read_file(filename string) ([]byte, error) {
|
||||
req := file_read_request{
|
||||
filename,
|
||||
make(chan file_read_response),
|
||||
}
|
||||
this.in <- req
|
||||
rsp := <-req.out
|
||||
return rsp.data, rsp.error
|
||||
}
|
||||
|
||||
var file_reader = new_file_reader()
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// copy of the build.Context without func fields
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type go_build_context struct {
|
||||
GOARCH string
|
||||
GOOS string
|
||||
GOROOT string
|
||||
GOPATH string
|
||||
CgoEnabled bool
|
||||
UseAllFiles bool
|
||||
Compiler string
|
||||
BuildTags []string
|
||||
ReleaseTags []string
|
||||
InstallSuffix string
|
||||
}
|
||||
|
||||
func pack_build_context(ctx *build.Context) go_build_context {
|
||||
return go_build_context{
|
||||
GOARCH: ctx.GOARCH,
|
||||
GOOS: ctx.GOOS,
|
||||
GOROOT: ctx.GOROOT,
|
||||
GOPATH: ctx.GOPATH,
|
||||
CgoEnabled: ctx.CgoEnabled,
|
||||
UseAllFiles: ctx.UseAllFiles,
|
||||
Compiler: ctx.Compiler,
|
||||
BuildTags: ctx.BuildTags,
|
||||
ReleaseTags: ctx.ReleaseTags,
|
||||
InstallSuffix: ctx.InstallSuffix,
|
||||
}
|
||||
}
|
||||
|
||||
func unpack_build_context(ctx *go_build_context) package_lookup_context {
|
||||
return package_lookup_context{
|
||||
Context: build.Context{
|
||||
GOARCH: ctx.GOARCH,
|
||||
GOOS: ctx.GOOS,
|
||||
GOROOT: ctx.GOROOT,
|
||||
GOPATH: ctx.GOPATH,
|
||||
CgoEnabled: ctx.CgoEnabled,
|
||||
UseAllFiles: ctx.UseAllFiles,
|
||||
Compiler: ctx.Compiler,
|
||||
BuildTags: ctx.BuildTags,
|
||||
ReleaseTags: ctx.ReleaseTags,
|
||||
InstallSuffix: ctx.InstallSuffix,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
Copyright (c) 2015, visualfc <visualfc@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of gotools nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
# gotools
|
||||
liteide golang tools
|
|
@ -1,685 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package doc extracts source code documentation from a Go AST.
|
||||
package astview
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type typeDoc struct {
|
||||
// len(decl.Specs) == 1, and the element type is *ast.TypeSpec
|
||||
// if the type declaration hasn't been seen yet, decl is nil
|
||||
decl *ast.GenDecl
|
||||
// values, factory functions, and methods associated with the type
|
||||
values []*ast.GenDecl // consts and vars
|
||||
factories map[string]*ast.FuncDecl
|
||||
methods map[string]*ast.FuncDecl
|
||||
}
|
||||
|
||||
// docReader accumulates documentation for a single package.
|
||||
// It modifies the AST: Comments (declaration documentation)
|
||||
// that have been collected by the DocReader are set to nil
|
||||
// in the respective AST nodes so that they are not printed
|
||||
// twice (once when printing the documentation and once when
|
||||
// printing the corresponding AST node).
|
||||
//
|
||||
type docReader struct {
|
||||
doc *ast.CommentGroup // package documentation, if any
|
||||
pkgName string
|
||||
showAll bool
|
||||
values []*ast.GenDecl // consts and vars
|
||||
types map[string]*typeDoc
|
||||
funcs map[string]*ast.FuncDecl
|
||||
imports map[string]int
|
||||
bugs []*ast.CommentGroup
|
||||
}
|
||||
|
||||
func (doc *docReader) init(pkgName string, showAll bool) {
|
||||
doc.pkgName = pkgName
|
||||
doc.showAll = showAll
|
||||
doc.imports = make(map[string]int)
|
||||
doc.types = make(map[string]*typeDoc)
|
||||
doc.funcs = make(map[string]*ast.FuncDecl)
|
||||
}
|
||||
|
||||
func (doc *docReader) addDoc(comments *ast.CommentGroup) {
|
||||
if doc.doc == nil {
|
||||
// common case: just one package comment
|
||||
doc.doc = comments
|
||||
return
|
||||
}
|
||||
|
||||
// More than one package comment: Usually there will be only
|
||||
// one file with a package comment, but it's better to collect
|
||||
// all comments than drop them on the floor.
|
||||
// (This code isn't particularly clever - no amortized doubling is
|
||||
// used - but this situation occurs rarely and is not time-critical.)
|
||||
n1 := len(doc.doc.List)
|
||||
n2 := len(comments.List)
|
||||
list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line
|
||||
copy(list, doc.doc.List)
|
||||
list[n1] = &ast.Comment{token.NoPos, "//"} // separator line
|
||||
copy(list[n1+1:], comments.List)
|
||||
doc.doc = &ast.CommentGroup{list}
|
||||
}
|
||||
|
||||
func (doc *docReader) addType(decl *ast.GenDecl) {
|
||||
spec := decl.Specs[0].(*ast.TypeSpec)
|
||||
typ := doc.lookupTypeDoc(spec.Name.Name)
|
||||
// typ should always be != nil since declared types
|
||||
// are always named - be conservative and check
|
||||
if typ != nil {
|
||||
// a type should be added at most once, so typ.decl
|
||||
// should be nil - if it isn't, simply overwrite it
|
||||
typ.decl = decl
|
||||
}
|
||||
}
|
||||
|
||||
func (doc *docReader) lookupTypeDoc(name string) *typeDoc {
|
||||
if name == "" {
|
||||
return nil // no type docs for anonymous types
|
||||
}
|
||||
if tdoc, found := doc.types[name]; found {
|
||||
return tdoc
|
||||
}
|
||||
// type wasn't found - add one without declaration
|
||||
tdoc := &typeDoc{nil, nil, make(map[string]*ast.FuncDecl), make(map[string]*ast.FuncDecl)}
|
||||
doc.types[name] = tdoc
|
||||
return tdoc
|
||||
}
|
||||
|
||||
func docBaseTypeName(typ ast.Expr, showAll bool) string {
|
||||
switch t := typ.(type) {
|
||||
case *ast.Ident:
|
||||
// if the type is not exported, the effect to
|
||||
// a client is as if there were no type name
|
||||
if showAll || t.IsExported() {
|
||||
return t.Name
|
||||
}
|
||||
case *ast.StarExpr:
|
||||
return docBaseTypeName(t.X, showAll)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (doc *docReader) addValue(decl *ast.GenDecl) {
|
||||
// determine if decl should be associated with a type
|
||||
// Heuristic: For each typed entry, determine the type name, if any.
|
||||
// If there is exactly one type name that is sufficiently
|
||||
// frequent, associate the decl with the respective type.
|
||||
domName := ""
|
||||
domFreq := 0
|
||||
prev := ""
|
||||
for _, s := range decl.Specs {
|
||||
if v, ok := s.(*ast.ValueSpec); ok {
|
||||
name := ""
|
||||
switch {
|
||||
case v.Type != nil:
|
||||
// a type is present; determine its name
|
||||
name = docBaseTypeName(v.Type, doc.showAll)
|
||||
case decl.Tok == token.CONST:
|
||||
// no type is present but we have a constant declaration;
|
||||
// use the previous type name (w/o more type information
|
||||
// we cannot handle the case of unnamed variables with
|
||||
// initializer expressions except for some trivial cases)
|
||||
name = prev
|
||||
}
|
||||
if name != "" {
|
||||
// entry has a named type
|
||||
if domName != "" && domName != name {
|
||||
// more than one type name - do not associate
|
||||
// with any type
|
||||
domName = ""
|
||||
break
|
||||
}
|
||||
domName = name
|
||||
domFreq++
|
||||
}
|
||||
prev = name
|
||||
}
|
||||
}
|
||||
|
||||
// determine values list
|
||||
const threshold = 0.75
|
||||
values := &doc.values
|
||||
if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) {
|
||||
// typed entries are sufficiently frequent
|
||||
typ := doc.lookupTypeDoc(domName)
|
||||
if typ != nil {
|
||||
values = &typ.values // associate with that type
|
||||
}
|
||||
}
|
||||
|
||||
*values = append(*values, decl)
|
||||
}
|
||||
|
||||
// Helper function to set the table entry for function f. Makes sure that
|
||||
// at least one f with associated documentation is stored in table, if there
|
||||
// are multiple f's with the same name.
|
||||
func setFunc(table map[string]*ast.FuncDecl, f *ast.FuncDecl) {
|
||||
name := f.Name.Name
|
||||
if g, exists := table[name]; exists && g.Doc != nil {
|
||||
// a function with the same name has already been registered;
|
||||
// since it has documentation, assume f is simply another
|
||||
// implementation and ignore it
|
||||
// TODO(gri) consider collecting all functions, or at least
|
||||
// all comments
|
||||
return
|
||||
}
|
||||
// function doesn't exist or has no documentation; use f
|
||||
table[name] = f
|
||||
}
|
||||
|
||||
func (doc *docReader) addFunc(fun *ast.FuncDecl) {
|
||||
name := fun.Name.Name
|
||||
|
||||
// determine if it should be associated with a type
|
||||
if fun.Recv != nil {
|
||||
// method
|
||||
typ := doc.lookupTypeDoc(docBaseTypeName(fun.Recv.List[0].Type, doc.showAll))
|
||||
if typ != nil {
|
||||
// exported receiver type
|
||||
setFunc(typ.methods, fun)
|
||||
}
|
||||
// otherwise don't show the method
|
||||
// TODO(gri): There may be exported methods of non-exported types
|
||||
// that can be called because of exported values (consts, vars, or
|
||||
// function results) of that type. Could determine if that is the
|
||||
// case and then show those methods in an appropriate section.
|
||||
return
|
||||
}
|
||||
|
||||
// perhaps a factory function
|
||||
// determine result type, if any
|
||||
if fun.Type.Results.NumFields() >= 1 {
|
||||
res := fun.Type.Results.List[0]
|
||||
if len(res.Names) <= 1 {
|
||||
// exactly one (named or anonymous) result associated
|
||||
// with the first type in result signature (there may
|
||||
// be more than one result)
|
||||
tname := docBaseTypeName(res.Type, doc.showAll)
|
||||
typ := doc.lookupTypeDoc(tname)
|
||||
if typ != nil {
|
||||
// named and exported result type
|
||||
|
||||
// Work-around for failure of heuristic: In package os
|
||||
// too many functions are considered factory functions
|
||||
// for the Error type. Eliminate manually for now as
|
||||
// this appears to be the only important case in the
|
||||
// current library where the heuristic fails.
|
||||
if doc.pkgName == "os" && tname == "Error" &&
|
||||
name != "NewError" && name != "NewSyscallError" {
|
||||
// not a factory function for os.Error
|
||||
setFunc(doc.funcs, fun) // treat as ordinary function
|
||||
return
|
||||
}
|
||||
|
||||
setFunc(typ.factories, fun)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ordinary function
|
||||
setFunc(doc.funcs, fun)
|
||||
}
|
||||
|
||||
func (doc *docReader) addDecl(decl ast.Decl) {
|
||||
switch d := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
if len(d.Specs) > 0 {
|
||||
switch d.Tok {
|
||||
case token.IMPORT:
|
||||
// imports are handled individually
|
||||
for _, spec := range d.Specs {
|
||||
if s, ok := spec.(*ast.ImportSpec); ok {
|
||||
if import_, err := strconv.Unquote(s.Path.Value); err == nil {
|
||||
doc.imports[import_] = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
case token.CONST, token.VAR:
|
||||
// constants and variables are always handled as a group
|
||||
doc.addValue(d)
|
||||
case token.TYPE:
|
||||
// types are handled individually
|
||||
for _, spec := range d.Specs {
|
||||
// make a (fake) GenDecl node for this TypeSpec
|
||||
// (we need to do this here - as opposed to just
|
||||
// for printing - so we don't lose the GenDecl
|
||||
// documentation)
|
||||
//
|
||||
// TODO(gri): Consider just collecting the TypeSpec
|
||||
// node (and copy in the GenDecl.doc if there is no
|
||||
// doc in the TypeSpec - this is currently done in
|
||||
// makeTypeDocs below). Simpler data structures, but
|
||||
// would lose GenDecl documentation if the TypeSpec
|
||||
// has documentation as well.
|
||||
doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, token.NoPos, []ast.Spec{spec}, token.NoPos})
|
||||
// A new GenDecl node is created, no need to nil out d.Doc.
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
doc.addFunc(d)
|
||||
}
|
||||
}
|
||||
|
||||
func copyCommentList(list []*ast.Comment) []*ast.Comment {
|
||||
return append([]*ast.Comment(nil), list...)
|
||||
}
|
||||
|
||||
var (
|
||||
bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid):
|
||||
bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
|
||||
)
|
||||
|
||||
// addFile adds the AST for a source file to the docReader.
|
||||
// Adding the same AST multiple times is a no-op.
|
||||
//
|
||||
func (doc *docReader) addFile(src *ast.File) {
|
||||
// add package documentation
|
||||
if src.Doc != nil {
|
||||
doc.addDoc(src.Doc)
|
||||
src.Doc = nil // doc consumed - remove from ast.File node
|
||||
}
|
||||
|
||||
// add all declarations
|
||||
for _, decl := range src.Decls {
|
||||
doc.addDecl(decl)
|
||||
}
|
||||
|
||||
// collect BUG(...) comments
|
||||
for _, c := range src.Comments {
|
||||
text := c.List[0].Text
|
||||
if m := bug_markers.FindStringIndex(text); m != nil {
|
||||
// found a BUG comment; maybe empty
|
||||
if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
|
||||
// non-empty BUG comment; collect comment without BUG prefix
|
||||
list := copyCommentList(c.List)
|
||||
list[0].Text = text[m[1]:]
|
||||
doc.bugs = append(doc.bugs, &ast.CommentGroup{list})
|
||||
}
|
||||
}
|
||||
}
|
||||
src.Comments = nil // consumed unassociated comments - remove from ast.File node
|
||||
}
|
||||
|
||||
func NewFileDoc(file *ast.File, showAll bool) *PackageDoc {
|
||||
var r docReader
|
||||
r.init(file.Name.Name, showAll)
|
||||
r.addFile(file)
|
||||
return r.newDoc("", nil)
|
||||
}
|
||||
|
||||
func NewPackageDoc(pkg *ast.Package, importpath string, showAll bool) *PackageDoc {
|
||||
var r docReader
|
||||
r.init(pkg.Name, showAll)
|
||||
filenames := make([]string, len(pkg.Files))
|
||||
i := 0
|
||||
for filename, f := range pkg.Files {
|
||||
r.addFile(f)
|
||||
filenames[i] = filename
|
||||
i++
|
||||
}
|
||||
return r.newDoc(importpath, filenames)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Conversion to external representation
|
||||
|
||||
// ValueDoc is the documentation for a group of declared
|
||||
// values, either vars or consts.
|
||||
//
|
||||
type ValueDoc struct {
|
||||
Doc string
|
||||
Decl *ast.GenDecl
|
||||
order int
|
||||
}
|
||||
|
||||
type sortValueDoc []*ValueDoc
|
||||
|
||||
func (p sortValueDoc) Len() int { return len(p) }
|
||||
func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func declName(d *ast.GenDecl) string {
|
||||
if len(d.Specs) != 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch v := d.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
return v.Names[0].Name
|
||||
case *ast.TypeSpec:
|
||||
return v.Name.Name
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p sortValueDoc) Less(i, j int) bool {
|
||||
// sort by name
|
||||
// pull blocks (name = "") up to top
|
||||
// in original order
|
||||
if ni, nj := declName(p[i].Decl), declName(p[j].Decl); ni != nj {
|
||||
return ni < nj
|
||||
}
|
||||
return p[i].order < p[j].order
|
||||
}
|
||||
|
||||
func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc {
|
||||
d := make([]*ValueDoc, len(list)) // big enough in any case
|
||||
n := 0
|
||||
for i, decl := range list {
|
||||
if decl.Tok == tok {
|
||||
d[n] = &ValueDoc{decl.Doc.Text(), decl, i}
|
||||
n++
|
||||
decl.Doc = nil // doc consumed - removed from AST
|
||||
}
|
||||
}
|
||||
d = d[0:n]
|
||||
sort.Sort(sortValueDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
// FuncDoc is the documentation for a func declaration,
|
||||
// either a top-level function or a method function.
|
||||
//
|
||||
type FuncDoc struct {
|
||||
Doc string
|
||||
Recv ast.Expr // TODO(rsc): Would like string here
|
||||
Name string
|
||||
Decl *ast.FuncDecl
|
||||
}
|
||||
|
||||
type sortFuncDoc []*FuncDoc
|
||||
|
||||
func (p sortFuncDoc) Len() int { return len(p) }
|
||||
func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name }
|
||||
|
||||
func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc {
|
||||
d := make([]*FuncDoc, len(m))
|
||||
i := 0
|
||||
for _, f := range m {
|
||||
doc := new(FuncDoc)
|
||||
doc.Doc = f.Doc.Text()
|
||||
f.Doc = nil // doc consumed - remove from ast.FuncDecl node
|
||||
if f.Recv != nil {
|
||||
doc.Recv = f.Recv.List[0].Type
|
||||
}
|
||||
doc.Name = f.Name.Name
|
||||
doc.Decl = f
|
||||
d[i] = doc
|
||||
i++
|
||||
}
|
||||
sort.Sort(sortFuncDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
// TypeDoc is the documentation for a declared type.
|
||||
// Consts and Vars are sorted lists of constants and variables of (mostly) that type.
|
||||
// Factories is a sorted list of factory functions that return that type.
|
||||
// Methods is a sorted list of method functions on that type.
|
||||
type TypeDoc struct {
|
||||
Doc string
|
||||
Type *ast.TypeSpec
|
||||
Consts []*ValueDoc
|
||||
Vars []*ValueDoc
|
||||
Funcs []*FuncDoc
|
||||
Methods []*FuncDoc
|
||||
Decl *ast.GenDecl
|
||||
order int
|
||||
}
|
||||
|
||||
type sortTypeDoc []*TypeDoc
|
||||
|
||||
func (p sortTypeDoc) Len() int { return len(p) }
|
||||
func (p sortTypeDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p sortTypeDoc) Less(i, j int) bool {
|
||||
// sort by name
|
||||
// pull blocks (name = "") up to top
|
||||
// in original order
|
||||
if ni, nj := p[i].Type.Name.Name, p[j].Type.Name.Name; ni != nj {
|
||||
return ni < nj
|
||||
}
|
||||
return p[i].order < p[j].order
|
||||
}
|
||||
|
||||
// NOTE(rsc): This would appear not to be correct for type ( )
|
||||
// blocks, but the doc extractor above has split them into
|
||||
// individual declarations.
|
||||
func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc {
|
||||
d := make([]*TypeDoc, len(m))
|
||||
i := 0
|
||||
for _, old := range m {
|
||||
// all typeDocs should have a declaration associated with
|
||||
// them after processing an entire package - be conservative
|
||||
// and check
|
||||
if decl := old.decl; decl != nil {
|
||||
typespec := decl.Specs[0].(*ast.TypeSpec)
|
||||
t := new(TypeDoc)
|
||||
doc := typespec.Doc
|
||||
typespec.Doc = nil // doc consumed - remove from ast.TypeSpec node
|
||||
if doc == nil {
|
||||
// no doc associated with the spec, use the declaration doc, if any
|
||||
doc = decl.Doc
|
||||
}
|
||||
decl.Doc = nil // doc consumed - remove from ast.Decl node
|
||||
t.Doc = doc.Text()
|
||||
t.Type = typespec
|
||||
t.Consts = makeValueDocs(old.values, token.CONST)
|
||||
t.Vars = makeValueDocs(old.values, token.VAR)
|
||||
t.Funcs = makeFuncDocs(old.factories)
|
||||
t.Methods = makeFuncDocs(old.methods)
|
||||
t.Decl = old.decl
|
||||
t.order = i
|
||||
d[i] = t
|
||||
i++
|
||||
} else {
|
||||
// no corresponding type declaration found - move any associated
|
||||
// values, factory functions, and methods back to the top-level
|
||||
// so that they are not lost (this should only happen if a package
|
||||
// file containing the explicit type declaration is missing or if
|
||||
// an unqualified type name was used after a "." import)
|
||||
// 1) move values
|
||||
doc.values = append(doc.values, old.values...)
|
||||
// 2) move factory functions
|
||||
for name, f := range old.factories {
|
||||
doc.funcs[name] = f
|
||||
}
|
||||
// 3) move methods
|
||||
for name, f := range old.methods {
|
||||
// don't overwrite functions with the same name
|
||||
if _, found := doc.funcs[name]; !found {
|
||||
doc.funcs[name] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
d = d[0:i] // some types may have been ignored
|
||||
sort.Sort(sortTypeDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
func makeBugDocs(list []*ast.CommentGroup) []string {
|
||||
d := make([]string, len(list))
|
||||
for i, g := range list {
|
||||
d[i] = g.Text()
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// PackageDoc is the documentation for an entire package.
|
||||
//
|
||||
type PackageDoc struct {
|
||||
PackageName string
|
||||
ImportPath string
|
||||
Imports []string
|
||||
Filenames []string
|
||||
Doc string
|
||||
Consts []*ValueDoc
|
||||
Types []*TypeDoc
|
||||
Vars []*ValueDoc
|
||||
Funcs []*FuncDoc
|
||||
Factorys []*FuncDoc
|
||||
Bugs []string
|
||||
}
|
||||
|
||||
// newDoc returns the accumulated documentation for the package.
|
||||
//
|
||||
func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc {
|
||||
p := new(PackageDoc)
|
||||
p.PackageName = doc.pkgName
|
||||
p.ImportPath = importpath
|
||||
sort.Strings(filenames)
|
||||
p.Filenames = filenames
|
||||
p.Doc = doc.doc.Text()
|
||||
p.Imports = sortedKeys(doc.imports)
|
||||
// makeTypeDocs may extend the list of doc.values and
|
||||
// doc.funcs and thus must be called before any other
|
||||
// function consuming those lists
|
||||
p.Types = doc.makeTypeDocs(doc.types)
|
||||
p.Consts = makeValueDocs(doc.values, token.CONST)
|
||||
p.Vars = makeValueDocs(doc.values, token.VAR)
|
||||
p.Funcs = makeFuncDocs(doc.funcs)
|
||||
p.Bugs = makeBugDocs(doc.bugs)
|
||||
|
||||
for _, d := range p.Types {
|
||||
switch d.Type.Type.(type) {
|
||||
case *ast.StructType:
|
||||
p.Factorys = append(p.Factorys, d.Funcs...)
|
||||
d.Funcs = make([]*FuncDoc, 0)
|
||||
case *ast.InterfaceType:
|
||||
p.Factorys = append(p.Factorys, d.Funcs...)
|
||||
d.Funcs = make([]*FuncDoc, 0)
|
||||
default:
|
||||
p.Vars = append(p.Vars, d.Vars...)
|
||||
d.Vars = make([]*ValueDoc, 0)
|
||||
p.Consts = append(p.Consts, d.Consts...)
|
||||
d.Consts = make([]*ValueDoc, 0)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func sortedKeys(m map[string]int) []string {
|
||||
list := make([]string, len(m))
|
||||
i := 0
|
||||
for key := range m {
|
||||
list[i] = key
|
||||
i++
|
||||
}
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filtering by name
|
||||
|
||||
type Filter func(string) bool
|
||||
|
||||
func matchFields(fields *ast.FieldList, f Filter) bool {
|
||||
if fields != nil {
|
||||
for _, field := range fields.List {
|
||||
for _, name := range field.Names {
|
||||
if f(name.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchDecl(d *ast.GenDecl, f Filter) bool {
|
||||
for _, d := range d.Specs {
|
||||
switch v := d.(type) {
|
||||
case *ast.ValueSpec:
|
||||
for _, name := range v.Names {
|
||||
if f(name.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
if f(v.Name.Name) {
|
||||
return true
|
||||
}
|
||||
switch t := v.Type.(type) {
|
||||
case *ast.StructType:
|
||||
if matchFields(t.Fields, f) {
|
||||
return true
|
||||
}
|
||||
case *ast.InterfaceType:
|
||||
if matchFields(t.Methods, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc {
|
||||
w := 0
|
||||
for _, vd := range a {
|
||||
if matchDecl(vd.Decl, f) {
|
||||
a[w] = vd
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc {
|
||||
w := 0
|
||||
for _, fd := range a {
|
||||
if f(fd.Name) {
|
||||
a[w] = fd
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc {
|
||||
w := 0
|
||||
for _, td := range a {
|
||||
n := 0 // number of matches
|
||||
if matchDecl(td.Decl, f) {
|
||||
n = 1
|
||||
} else {
|
||||
// type name doesn't match, but we may have matching consts, vars, factories or methods
|
||||
td.Consts = filterValueDocs(td.Consts, f)
|
||||
td.Vars = filterValueDocs(td.Vars, f)
|
||||
td.Funcs = filterFuncDocs(td.Funcs, f)
|
||||
td.Methods = filterFuncDocs(td.Methods, f)
|
||||
n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
|
||||
}
|
||||
if n > 0 {
|
||||
a[w] = td
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
// Filter eliminates documentation for names that don't pass through the filter f.
|
||||
// TODO: Recognize "Type.Method" as a name.
|
||||
//
|
||||
func (p *PackageDoc) Filter(f Filter) {
|
||||
p.Consts = filterValueDocs(p.Consts, f)
|
||||
p.Vars = filterValueDocs(p.Vars, f)
|
||||
p.Types = filterTypeDocs(p.Types, f)
|
||||
p.Funcs = filterFuncDocs(p.Funcs, f)
|
||||
p.Doc = "" // don't show top-level package doc
|
||||
}
|
|
@ -1,346 +0,0 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astview
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/visualfc/gotools/command"
|
||||
|
||||
"golang.org/x/tools/go/types"
|
||||
)
|
||||
|
||||
var Command = &command.Command{
|
||||
Run: runAstView,
|
||||
UsageLine: "astview [-stdin] files...",
|
||||
Short: "print go files astview",
|
||||
Long: `print go files astview`,
|
||||
}
|
||||
|
||||
var astViewStdin bool
|
||||
|
||||
func init() {
|
||||
Command.Flag.BoolVar(&astViewStdin, "stdin", false, "input from stdin")
|
||||
}
|
||||
|
||||
func runAstView(cmd *command.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
cmd.Usage()
|
||||
return os.ErrInvalid
|
||||
}
|
||||
if astViewStdin {
|
||||
view, err := NewFilePackageSource(args[0], os.Stdin, true)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "astview: %s", err)
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
view.PrintTree(os.Stdout)
|
||||
} else {
|
||||
err := PrintFilesTree(args, os.Stdout, true)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "astview:%s", err)
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
tag_package = "p"
|
||||
tag_imports_folder = "+m"
|
||||
tag_import = "mm"
|
||||
tag_type = "t"
|
||||
tag_struct = "s"
|
||||
tag_interface = "i"
|
||||
tag_value = "v"
|
||||
tag_const = "c"
|
||||
tag_func = "f"
|
||||
tag_value_folder = "+v"
|
||||
tag_const_folder = "+c"
|
||||
tag_func_folder = "+f"
|
||||
tag_factor_folder = "+tf"
|
||||
tag_type_method = "tm"
|
||||
tag_type_factor = "tf"
|
||||
tag_type_value = "tv"
|
||||
)
|
||||
|
||||
type PackageView struct {
|
||||
fset *token.FileSet
|
||||
pdoc *PackageDoc
|
||||
pkg *ast.Package
|
||||
expr bool
|
||||
}
|
||||
|
||||
var AllFiles []string
|
||||
|
||||
func (p *PackageView) posFileIndex(pos token.Position) int {
|
||||
var index = -1
|
||||
for i := 0; i < len(AllFiles); i++ {
|
||||
if AllFiles[i] == pos.Filename {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
AllFiles = append(AllFiles, pos.Filename)
|
||||
index = len(AllFiles) - 1
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (p *PackageView) posText(pos token.Position) (s string) {
|
||||
index := p.posFileIndex(pos)
|
||||
return fmt.Sprintf("%d:%d:%d", index, pos.Line, pos.Column)
|
||||
}
|
||||
|
||||
func NewFilePackage(filename string) (*PackageView, error) {
|
||||
p := new(PackageView)
|
||||
p.fset = token.NewFileSet()
|
||||
file, err := parser.ParseFile(p.fset, filename, nil, parser.AllErrors)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
m := make(map[string]*ast.File)
|
||||
m[filename] = file
|
||||
pkg, err := ast.NewPackage(p.fset, m, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.pkg = pkg
|
||||
p.pdoc = NewPackageDoc(pkg, pkg.Name, true)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func NewPackageView(pkg *ast.Package, fset *token.FileSet, expr bool) (*PackageView, error) {
|
||||
p := new(PackageView)
|
||||
p.fset = fset
|
||||
p.pkg = pkg
|
||||
p.pdoc = NewPackageDoc(pkg, pkg.Name, true)
|
||||
p.expr = expr
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func ParseFiles(fset *token.FileSet, filenames []string, mode parser.Mode) (pkgs map[string]*ast.Package, pkgsfiles []string, first error) {
|
||||
pkgs = make(map[string]*ast.Package)
|
||||
for _, filename := range filenames {
|
||||
if src, err := parser.ParseFile(fset, filename, nil, mode); src != nil {
|
||||
name := src.Name.Name
|
||||
pkg, found := pkgs[name]
|
||||
if !found {
|
||||
pkg = &ast.Package{
|
||||
Name: name,
|
||||
Files: make(map[string]*ast.File),
|
||||
}
|
||||
pkgs[name] = pkg
|
||||
}
|
||||
pkg.Files[filename] = src
|
||||
pkgsfiles = append(pkgsfiles, filename)
|
||||
} else {
|
||||
first = err
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func PrintFilesTree(filenames []string, w io.Writer, expr bool) error {
|
||||
fset := token.NewFileSet()
|
||||
pkgs, pkgsfiles, err := ParseFiles(fset, filenames, parser.AllErrors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
AllFiles = pkgsfiles
|
||||
for i := 0; i < len(AllFiles); i++ {
|
||||
fmt.Fprintf(w, "@%s\n", AllFiles[i])
|
||||
}
|
||||
for _, pkg := range pkgs {
|
||||
view, err := NewPackageView(pkg, fset, expr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
view.PrintTree(w)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFilePackageSource(filename string, f *os.File, expr bool) (*PackageView, error) {
|
||||
src, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := new(PackageView)
|
||||
p.fset = token.NewFileSet()
|
||||
p.expr = expr
|
||||
file, err := parser.ParseFile(p.fset, filename, src, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := make(map[string]*ast.File)
|
||||
m[filename] = file
|
||||
pkg, err := ast.NewPackage(p.fset, m, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.pdoc = NewPackageDoc(pkg, pkg.Name, true)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *PackageView) printFuncsHelper(w io.Writer, funcs []*FuncDoc, level int, tag string, tag_folder string) {
|
||||
for _, f := range funcs {
|
||||
pos := p.fset.Position(f.Decl.Pos())
|
||||
if p.expr {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s@%s\n", level, tag, f.Name, p.posText(pos), types.ExprString(f.Decl.Type))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag, f.Name, p.posText(pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintVars(w io.Writer, vars []*ValueDoc, level int, tag string, tag_folder string) {
|
||||
if len(tag_folder) > 0 && len(vars) > 0 {
|
||||
if tag_folder == tag_value_folder {
|
||||
fmt.Fprintf(w, "%d,%s,Variables\n", level, tag_folder)
|
||||
} else if tag_folder == tag_const_folder {
|
||||
fmt.Fprintf(w, "%d,%s,Constants\n", level, tag_folder)
|
||||
}
|
||||
level++
|
||||
}
|
||||
for _, v := range vars {
|
||||
if v.Decl == nil {
|
||||
continue
|
||||
}
|
||||
for _, s := range v.Decl.Specs {
|
||||
if m, ok := s.(*ast.ValueSpec); ok {
|
||||
pos := p.fset.Position(m.Pos())
|
||||
for i := 0; i < len(m.Names); i++ {
|
||||
if p.expr && m.Type != nil {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s@%s\n", level, tag, m.Names[i], p.posText(pos), types.ExprString(m.Type))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag, m.Names[i], p.posText(pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (p *PackageView) PrintTypes(w io.Writer, types []*TypeDoc, level int) {
|
||||
for _, d := range types {
|
||||
if d.Decl == nil {
|
||||
continue
|
||||
}
|
||||
typespec := d.Decl.Specs[0].(*ast.TypeSpec)
|
||||
var tag = tag_type
|
||||
if _, ok := typespec.Type.(*ast.InterfaceType); ok {
|
||||
tag = tag_interface
|
||||
} else if _, ok := typespec.Type.(*ast.StructType); ok {
|
||||
tag = tag_struct
|
||||
}
|
||||
pos := p.fset.Position(d.Decl.Pos())
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag, d.Type.Name, p.posText(pos))
|
||||
p.printFuncsHelper(w, d.Funcs, level+1, tag_type_factor, "")
|
||||
p.printFuncsHelper(w, d.Methods, level+1, tag_type_method, "")
|
||||
p.PrintTypeFields(w, d.Decl, level+1)
|
||||
//p.PrintVars(w, d.Consts, level+1, tag_const, "")
|
||||
//p.PrintVars(w, d.Vars, level+1, tag_value, "")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintTypeFields(w io.Writer, decl *ast.GenDecl, level int) {
|
||||
spec, ok := decl.Specs[0].(*ast.TypeSpec)
|
||||
if ok == false {
|
||||
return
|
||||
}
|
||||
switch d := spec.Type.(type) {
|
||||
case *ast.StructType:
|
||||
for _, list := range d.Fields.List {
|
||||
if list.Names == nil {
|
||||
continue
|
||||
}
|
||||
for _, m := range list.Names {
|
||||
pos := p.fset.Position(m.Pos())
|
||||
if list.Type != nil {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s@%s\n", level, tag_type_value, m.Name, p.posText(pos), types.ExprString(list.Type))
|
||||
} else {
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag_type_value, m.Name, p.posText(pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.InterfaceType:
|
||||
for _, list := range d.Methods.List {
|
||||
if list.Names == nil {
|
||||
continue
|
||||
}
|
||||
for _, m := range list.Names {
|
||||
pos := p.fset.Position(m.Pos())
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag_type_method, m.Name, p.posText(pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintHeader(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%d,%s,%s\n", level, tag_package, p.pdoc.PackageName)
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintImports(w io.Writer, level int, tag, tag_folder string) {
|
||||
if tag_folder != "" && len(p.pdoc.Imports) > 0 {
|
||||
fmt.Fprintf(w, "%d,%s,%s\n", level, tag_folder, "Imports")
|
||||
level++
|
||||
}
|
||||
for _, name := range p.pdoc.Imports {
|
||||
vname := "\"" + name + "\""
|
||||
var ps []string
|
||||
for _, file := range p.pkg.Files {
|
||||
for _, v := range file.Imports {
|
||||
if v.Path.Value == vname {
|
||||
pos := p.fset.Position(v.Pos())
|
||||
ps = append(ps, p.posText(pos))
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%d,%s,%s,%s\n", level, tag, name, strings.Join(ps, ";"))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintFuncs(w io.Writer, level int, tag_folder string) {
|
||||
hasFolder := false
|
||||
if len(p.pdoc.Funcs) > 0 || len(p.pdoc.Factorys) > 0 {
|
||||
hasFolder = true
|
||||
}
|
||||
if !hasFolder {
|
||||
return
|
||||
}
|
||||
if len(tag_folder) > 0 {
|
||||
fmt.Fprintf(w, "%d,%s,Functions\n", level, tag_folder)
|
||||
level++
|
||||
}
|
||||
p.printFuncsHelper(w, p.pdoc.Factorys, level, tag_type_factor, tag_func_folder)
|
||||
p.printFuncsHelper(w, p.pdoc.Funcs, level, tag_func, tag_func_folder)
|
||||
}
|
||||
|
||||
func (p *PackageView) PrintPackage(w io.Writer, level int) {
|
||||
p.PrintHeader(w, level)
|
||||
level++
|
||||
p.PrintImports(w, level, tag_import, tag_imports_folder)
|
||||
p.PrintVars(w, p.pdoc.Vars, level, tag_value, tag_value_folder)
|
||||
p.PrintVars(w, p.pdoc.Consts, level, tag_const, tag_const_folder)
|
||||
p.PrintFuncs(w, level, tag_func_folder)
|
||||
p.PrintTypes(w, p.pdoc.Types, level)
|
||||
}
|
||||
|
||||
// level,tag,pos@info
|
||||
func (p *PackageView) PrintTree(w io.Writer) {
|
||||
p.PrintPackage(w, 0)
|
||||
}
|
|
@ -1,343 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//modify 2013-2014 visualfc
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Command is an implementation of a go command
|
||||
// like go build or go fix.
|
||||
type Command struct {
|
||||
// Run runs the command.
|
||||
// The args are the arguments after the command name.
|
||||
Run func(cmd *Command, args []string) error
|
||||
|
||||
// UsageLine is the one-line usage message.
|
||||
// The first word in the line is taken to be the command name.
|
||||
UsageLine string
|
||||
|
||||
// Short is the short description shown in the 'go help' output.
|
||||
Short string
|
||||
|
||||
// Long is the long message shown in the 'go help <this-command>' output.
|
||||
Long string
|
||||
|
||||
// Flag is a set of flags specific to this command.
|
||||
Flag flag.FlagSet
|
||||
|
||||
// CustomFlags indicates that the command will do its own
|
||||
// flag parsing.
|
||||
CustomFlags bool
|
||||
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
}
|
||||
|
||||
// Name returns the command's name: the first word in the usage line.
|
||||
func (c *Command) Name() string {
|
||||
name := c.UsageLine
|
||||
i := strings.Index(name, " ")
|
||||
if i >= 0 {
|
||||
name = name[:i]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (c *Command) Usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: %s %s\n", AppName, c.UsageLine)
|
||||
c.Flag.SetOutput(os.Stderr)
|
||||
c.Flag.PrintDefaults()
|
||||
//fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(c.Long))
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func (c *Command) PrintUsage() {
|
||||
fmt.Fprintf(Stderr, "usage: %s %s\n", AppName, c.UsageLine)
|
||||
c.Flag.SetOutput(Stderr)
|
||||
c.Flag.PrintDefaults()
|
||||
}
|
||||
|
||||
// Runnable reports whether the command can be run; otherwise
|
||||
// it is a documentation pseudo-command such as importpath.
|
||||
func (c *Command) Runnable() bool {
|
||||
return c.Run != nil
|
||||
}
|
||||
|
||||
func (c *Command) Println(args ...interface{}) {
|
||||
fmt.Fprintln(c.Stdout, args...)
|
||||
}
|
||||
|
||||
func (c *Command) Printf(format string, args ...interface{}) {
|
||||
fmt.Fprintf(c.Stdout, format, args...)
|
||||
}
|
||||
|
||||
var commands []*Command
|
||||
|
||||
func Register(cmd *Command) {
|
||||
commands = append(commands, cmd)
|
||||
}
|
||||
|
||||
func CommandList() (cmds []string) {
|
||||
for _, cmd := range commands {
|
||||
cmds = append(cmds, cmd.Name())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var exitStatus = 0
|
||||
var exitMu sync.Mutex
|
||||
|
||||
func SetExitStatus(n int) {
|
||||
exitMu.Lock()
|
||||
if exitStatus < n {
|
||||
exitStatus = n
|
||||
}
|
||||
exitMu.Unlock()
|
||||
}
|
||||
|
||||
var (
|
||||
Stdout io.Writer = os.Stdout
|
||||
Stderr io.Writer = os.Stderr
|
||||
Stdin io.Reader = os.Stdin
|
||||
)
|
||||
|
||||
func RunArgs(arguments []string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
|
||||
flag.CommandLine.Parse(arguments)
|
||||
args := flag.Args()
|
||||
if len(args) < 1 {
|
||||
printUsage(os.Stderr)
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
if len(args) == 1 && strings.TrimSpace(args[0]) == "" {
|
||||
printUsage(os.Stderr)
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
if args[0] == "help" {
|
||||
if !help(args[1:]) {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name() == args[0] && cmd.Run != nil {
|
||||
cmd.Flag.Usage = func() { cmd.Usage() }
|
||||
if cmd.CustomFlags {
|
||||
args = args[1:]
|
||||
} else {
|
||||
cmd.Flag.Parse(args[1:])
|
||||
args = cmd.Flag.Args()
|
||||
}
|
||||
cmd.Stdin = stdin
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
return cmd.Run(cmd, args)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: unknown subcommand %q\nRun '%s help' for usage.\n",
|
||||
AppName, args[0], AppName)
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
func Main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
log.SetFlags(0)
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) < 1 {
|
||||
usage()
|
||||
}
|
||||
|
||||
if len(args) == 1 && strings.TrimSpace(args[0]) == "" {
|
||||
usage()
|
||||
}
|
||||
|
||||
if args[0] == "help" {
|
||||
if !help(args[1:]) {
|
||||
os.Exit(2)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name() == args[0] && cmd.Run != nil {
|
||||
cmd.Flag.Usage = func() { cmd.Usage() }
|
||||
if cmd.CustomFlags {
|
||||
args = args[1:]
|
||||
} else {
|
||||
cmd.Flag.Parse(args[1:])
|
||||
args = cmd.Flag.Args()
|
||||
}
|
||||
cmd.Stdin = Stdin
|
||||
cmd.Stdout = Stdout
|
||||
cmd.Stderr = Stderr
|
||||
cmd.Run(cmd, args)
|
||||
Exit()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%s: unknown subcommand %q\nRun '%s help' for usage.\n",
|
||||
AppName, args[0], AppName)
|
||||
SetExitStatus(2)
|
||||
Exit()
|
||||
}
|
||||
|
||||
var AppInfo string = "LiteIDE golang tool."
|
||||
var AppName string = "tools"
|
||||
|
||||
var usageTemplate = `
|
||||
Usage:
|
||||
|
||||
{{AppName}} command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .}}{{if .Runnable}}
|
||||
{{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
|
||||
|
||||
Use "{{AppName}} help [command]" for more information about a command.
|
||||
|
||||
Additional help topics:
|
||||
{{range .}}{{if not .Runnable}}
|
||||
{{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}}
|
||||
|
||||
Use "{{AppName}} help [topic]" for more information about that topic.
|
||||
|
||||
`
|
||||
|
||||
var helpTemplate = `{{if .Runnable}}usage: {{AppName}} {{.UsageLine}}
|
||||
|
||||
{{end}}{{.Long | trim}}
|
||||
`
|
||||
|
||||
var documentationTemplate = `//
|
||||
/*
|
||||
{{range .}}{{if .Short}}{{.Short | capitalize}}
|
||||
|
||||
{{end}}{{if .Runnable}}Usage:
|
||||
|
||||
{{AppName}} {{.UsageLine}}
|
||||
|
||||
{{end}}{{.Long | trim}}
|
||||
|
||||
|
||||
{{end}}*/
|
||||
package main
|
||||
`
|
||||
|
||||
// tmpl executes the given template text on data, writing the result to w.
|
||||
func tmpl(w io.Writer, text string, data interface{}) {
|
||||
t := template.New("top")
|
||||
t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "capitalize": capitalize})
|
||||
template.Must(t.Parse(text))
|
||||
if err := t.Execute(w, data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func capitalize(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(unicode.ToTitle(r)) + s[n:]
|
||||
}
|
||||
|
||||
func printUsage(w io.Writer) {
|
||||
if len(AppInfo) > 0 {
|
||||
fmt.Fprintln(w, AppInfo)
|
||||
}
|
||||
tmpl(w, strings.Replace(usageTemplate, "{{AppName}}", AppName, -1), commands)
|
||||
}
|
||||
|
||||
func usage() {
|
||||
printUsage(os.Stderr)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// help implements the 'help' command.
|
||||
func help(args []string) bool {
|
||||
if len(args) == 0 {
|
||||
printUsage(os.Stdout)
|
||||
// not exit 2: succeeded at 'go help'.
|
||||
return true
|
||||
}
|
||||
if len(args) != 1 {
|
||||
fmt.Fprintf(os.Stderr, "usage: %s help command\n\nToo many arguments given.\n", AppName)
|
||||
return false
|
||||
}
|
||||
|
||||
arg := args[0]
|
||||
|
||||
// 'go help documentation' generates doc.go.
|
||||
if arg == "documentation" {
|
||||
buf := new(bytes.Buffer)
|
||||
printUsage(buf)
|
||||
usage := &Command{Long: buf.String()}
|
||||
tmpl(os.Stdout, strings.Replace(documentationTemplate, "{{AppName}}", AppName, -1), append([]*Command{usage}, commands...))
|
||||
return false
|
||||
}
|
||||
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name() == arg {
|
||||
tmpl(os.Stdout, strings.Replace(helpTemplate, "{{AppName}}", AppName, -1), cmd)
|
||||
// not exit 2: succeeded at 'go help cmd'.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Unknown help topic %#q. Run '%s help'.\n", arg, AppName)
|
||||
//os.Exit(2) // failed at 'go help cmd'
|
||||
return false
|
||||
}
|
||||
|
||||
var atexitFuncs []func()
|
||||
|
||||
func Atexit(f func()) {
|
||||
atexitFuncs = append(atexitFuncs, f)
|
||||
}
|
||||
|
||||
func Exit() {
|
||||
for _, f := range atexitFuncs {
|
||||
f()
|
||||
}
|
||||
os.Exit(exitStatus)
|
||||
}
|
||||
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
Errorf(format, args...)
|
||||
Exit()
|
||||
}
|
||||
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
log.Printf(format, args...)
|
||||
SetExitStatus(1)
|
||||
}
|
||||
|
||||
var logf = log.Printf
|
||||
|
||||
func ExitIfErrors() {
|
||||
if exitStatus != 0 {
|
||||
Exit()
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register(cmdVersion)
|
||||
}
|
||||
|
||||
var AppVersion string = "1.0"
|
||||
|
||||
var cmdVersion = &Command{
|
||||
Run: runVersion,
|
||||
UsageLine: "version",
|
||||
Short: "print tool version",
|
||||
Long: `Version prints the version.`,
|
||||
}
|
||||
|
||||
func runVersion(cmd *Command, args []string) error {
|
||||
if len(args) != 0 {
|
||||
cmd.PrintUsage()
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
cmd.Printf("%s version %s [%s %s/%s]\n", AppName, AppVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
return nil
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
gotools document
|
||||
*/
|
||||
package main
|
|
@ -1,352 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file contains the code dealing with package directory trees.
|
||||
|
||||
package docview
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/doc"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type Directory struct {
|
||||
Depth int
|
||||
Path string // includes Name
|
||||
Name string
|
||||
Text string // package documentation, if any
|
||||
Dirs []*Directory // subdirectories
|
||||
}
|
||||
|
||||
//func isGoFile(fi os.FileInfo) bool {
|
||||
// name := fi.Name()
|
||||
// return !fi.IsDir() &&
|
||||
// len(name) > 0 && name[0] != '.' && // ignore .files
|
||||
// filepath.Ext(name) == ".go"
|
||||
//}
|
||||
|
||||
func isGoFile(f os.FileInfo) bool {
|
||||
// ignore non-Go files
|
||||
name := f.Name()
|
||||
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
|
||||
}
|
||||
|
||||
func isPkgFile(fi os.FileInfo) bool {
|
||||
return isGoFile(fi) &&
|
||||
!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
|
||||
}
|
||||
|
||||
func isPkgDir(fi os.FileInfo) bool {
|
||||
name := fi.Name()
|
||||
return fi.IsDir() && len(name) > 0 &&
|
||||
name[0] != '_' && name[0] != '.' // ignore _files and .files
|
||||
}
|
||||
|
||||
func firstSentence(s string) string {
|
||||
i := -1 // index+1 of first terminator (punctuation ending a sentence)
|
||||
j := -1 // index+1 of first terminator followed by white space
|
||||
prev := 'A'
|
||||
for k, ch := range s {
|
||||
k1 := k + 1
|
||||
if ch == '.' || ch == '!' || ch == '?' {
|
||||
if i < 0 {
|
||||
i = k1 // first terminator
|
||||
}
|
||||
if k1 < len(s) && s[k1] <= ' ' {
|
||||
if j < 0 {
|
||||
j = k1 // first terminator followed by white space
|
||||
}
|
||||
if !unicode.IsUpper(prev) {
|
||||
j = k1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
prev = ch
|
||||
}
|
||||
|
||||
if j < 0 {
|
||||
// use the next best terminator
|
||||
j = i
|
||||
if j < 0 {
|
||||
// no terminator at all, use the entire string
|
||||
j = len(s)
|
||||
}
|
||||
}
|
||||
|
||||
return s[0:j]
|
||||
}
|
||||
|
||||
type treeBuilder struct {
|
||||
pathFilter func(string) bool
|
||||
maxDepth int
|
||||
}
|
||||
|
||||
func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
|
||||
if b.pathFilter != nil && !b.pathFilter(path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if depth >= b.maxDepth {
|
||||
// return a dummy directory so that the parent directory
|
||||
// doesn't get discarded just because we reached the max
|
||||
// directory depth
|
||||
return &Directory{depth, path, name, "", nil}
|
||||
}
|
||||
|
||||
list, err := fs.ReadDir(path)
|
||||
if err != nil {
|
||||
// newDirTree is called with a path that should be a package
|
||||
// directory; errors here should not happen, but if they do,
|
||||
// we want to know about them
|
||||
log.Printf("ReadDir(%s): %s", path, err)
|
||||
}
|
||||
|
||||
// determine number of subdirectories and if there are package files
|
||||
ndirs := 0
|
||||
hasPkgFiles := false
|
||||
var synopses [4]string // prioritized package documentation (0 == highest priority)
|
||||
for _, d := range list {
|
||||
switch {
|
||||
case isPkgDir(d):
|
||||
ndirs++
|
||||
case isPkgFile(d):
|
||||
// looks like a package file, but may just be a file ending in ".go";
|
||||
// don't just count it yet (otherwise we may end up with hasPkgFiles even
|
||||
// though the directory doesn't contain any real package files - was bug)
|
||||
if synopses[0] == "" {
|
||||
// no "optimal" package synopsis yet; continue to collect synopses
|
||||
//file, err := parseFile(fset, filepath.Join(path, d.Name()),
|
||||
//parser.ParseComments|parser.PackageClauseOnly)
|
||||
file, err := parser.ParseFile(fset, filepath.Join(path, d.Name()), nil,
|
||||
parser.ParseComments|parser.PackageClauseOnly)
|
||||
|
||||
if err == nil {
|
||||
hasPkgFiles = true
|
||||
if file.Doc != nil {
|
||||
// prioritize documentation
|
||||
i := -1
|
||||
switch file.Name.Name {
|
||||
case name:
|
||||
i = 0 // normal case: directory name matches package name
|
||||
case fakePkgName:
|
||||
i = 1 // synopses for commands
|
||||
case "main":
|
||||
i = 2 // directory contains a main package
|
||||
default:
|
||||
i = 3 // none of the above
|
||||
}
|
||||
if 0 <= i && i < len(synopses) && synopses[i] == "" {
|
||||
synopses[i] = doc.Synopsis(file.Doc.Text())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create subdirectory tree
|
||||
var dirs []*Directory
|
||||
if ndirs > 0 {
|
||||
dirs = make([]*Directory, ndirs)
|
||||
i := 0
|
||||
for _, d := range list {
|
||||
if isPkgDir(d) {
|
||||
name := d.Name()
|
||||
dd := b.newDirTree(fset, filepath.Join(path, name), name, depth+1)
|
||||
if dd != nil {
|
||||
dirs[i] = dd
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
dirs = dirs[0:i]
|
||||
}
|
||||
|
||||
// if there are no package files and no subdirectories
|
||||
// containing package files, ignore the directory
|
||||
if !hasPkgFiles && len(dirs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// select the highest-priority synopsis for the directory entry, if any
|
||||
synopsis := ""
|
||||
for _, synopsis = range synopses {
|
||||
if synopsis != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return &Directory{depth, path, name, synopsis, dirs}
|
||||
}
|
||||
|
||||
// newDirectory creates a new package directory tree with at most maxDepth
|
||||
// levels, anchored at root. The result tree is pruned such that it only
|
||||
// contains directories that contain package files or that contain
|
||||
// subdirectories containing package files (transitively). If a non-nil
|
||||
// pathFilter is provided, directory paths additionally must be accepted
|
||||
// by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is
|
||||
// provided for maxDepth, nodes at larger depths are pruned as well; they
|
||||
// are assumed to contain package files even if their contents are not known
|
||||
// (i.e., in this case the tree may contain directories w/o any package files).
|
||||
//
|
||||
func newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {
|
||||
// The root could be a symbolic link so use Stat not Lstat.
|
||||
d, err := fs.Stat(root)
|
||||
// If we fail here, report detailed error messages; otherwise
|
||||
// is is hard to see why a directory tree was not built.
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Printf("newDirectory(%s): %s", root, err)
|
||||
return nil
|
||||
case !isPkgDir(d):
|
||||
log.Printf("newDirectory(%s): not a package directory", root)
|
||||
return nil
|
||||
}
|
||||
if maxDepth < 0 {
|
||||
maxDepth = 1e6 // "infinity"
|
||||
}
|
||||
b := treeBuilder{pathFilter, maxDepth}
|
||||
// the file set provided is only for local parsing, no position
|
||||
// information escapes and thus we don't need to save the set
|
||||
return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
|
||||
}
|
||||
|
||||
func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
|
||||
if dir != nil {
|
||||
if len(dir.Dirs) == 0 {
|
||||
buf.WriteString(dir.Path)
|
||||
buf.WriteByte('\n')
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range dir.Dirs {
|
||||
d.writeLeafs(buf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
|
||||
if dir != nil {
|
||||
if !skipRoot {
|
||||
c <- dir
|
||||
}
|
||||
for _, d := range dir.Dirs {
|
||||
d.walk(c, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
|
||||
c := make(chan *Directory)
|
||||
go func() {
|
||||
dir.walk(c, skipRoot)
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
func (dir *Directory) lookupLocal(name string) *Directory {
|
||||
for _, d := range dir.Dirs {
|
||||
if d.Name == name {
|
||||
return d
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lookup looks for the *Directory for a given path, relative to dir.
|
||||
func (dir *Directory) lookup(path string) *Directory {
|
||||
d := strings.Split(dir.Path, string(filepath.Separator))
|
||||
p := strings.Split(path, string(filepath.Separator))
|
||||
i := 0
|
||||
for i < len(d) {
|
||||
if i >= len(p) || d[i] != p[i] {
|
||||
return nil
|
||||
}
|
||||
i++
|
||||
}
|
||||
for dir != nil && i < len(p) {
|
||||
dir = dir.lookupLocal(p[i])
|
||||
i++
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
// DirEntry describes a directory entry. The Depth and Height values
|
||||
// are useful for presenting an entry in an indented fashion.
|
||||
//
|
||||
type DirEntry struct {
|
||||
Depth int // >= 0
|
||||
Height int // = DirList.MaxHeight - Depth, > 0
|
||||
Path string // includes Name, relative to DirList root
|
||||
Name string
|
||||
Synopsis string
|
||||
}
|
||||
|
||||
type DirList struct {
|
||||
MaxHeight int // directory tree height, > 0
|
||||
List []DirEntry
|
||||
}
|
||||
|
||||
// listing creates a (linear) directory listing from a directory tree.
|
||||
// If skipRoot is set, the root directory itself is excluded from the list.
|
||||
//
|
||||
func (root *Directory) listing(skipRoot bool) *DirList {
|
||||
if root == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// determine number of entries n and maximum height
|
||||
n := 0
|
||||
minDepth := 1 << 30 // infinity
|
||||
maxDepth := 0
|
||||
for d := range root.iter(skipRoot) {
|
||||
n++
|
||||
if minDepth > d.Depth {
|
||||
minDepth = d.Depth
|
||||
}
|
||||
if maxDepth < d.Depth {
|
||||
maxDepth = d.Depth
|
||||
}
|
||||
}
|
||||
maxHeight := maxDepth - minDepth + 1
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// create list
|
||||
list := make([]DirEntry, n)
|
||||
i := 0
|
||||
for d := range root.iter(skipRoot) {
|
||||
p := &list[i]
|
||||
p.Depth = d.Depth - minDepth
|
||||
p.Height = maxHeight - p.Depth
|
||||
// the path is relative to root.Path - remove the root.Path
|
||||
// prefix (the prefix should always be present but avoid
|
||||
// crashes and check)
|
||||
path := d.Path
|
||||
if strings.HasPrefix(d.Path, root.Path) {
|
||||
path = d.Path[len(root.Path):]
|
||||
}
|
||||
// remove trailing separator if any - path must be relative
|
||||
if len(path) > 0 && path[0] == filepath.Separator {
|
||||
path = path[1:]
|
||||
}
|
||||
p.Path = filepath.ToSlash(path)
|
||||
p.Name = d.Name
|
||||
p.Synopsis = d.Text
|
||||
i++
|
||||
}
|
||||
|
||||
return &DirList{maxHeight, list}
|
||||
}
|
|
@ -1,407 +0,0 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package docview
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/visualfc/gotools/command"
|
||||
)
|
||||
|
||||
var Command = &command.Command{
|
||||
Run: runDocView,
|
||||
UsageLine: "docview [-mode] [-list|-find]",
|
||||
Short: "golang docview util",
|
||||
Long: `golang docview util`,
|
||||
}
|
||||
|
||||
var goroot = runtime.GOROOT()
|
||||
|
||||
var docViewFind string
|
||||
var docViewList string
|
||||
var docViewMode string
|
||||
|
||||
func init() {
|
||||
Command.Flag.StringVar(&docViewFind, "find", "", "find package list, :pkg flag is best match")
|
||||
Command.Flag.StringVar(&docViewList, "list", "", "Print go packages list [pkg|cmd]")
|
||||
Command.Flag.StringVar(&docViewMode, "mode", "text", "Print mode [text|html|lite]")
|
||||
}
|
||||
|
||||
func runDocView(cmd *command.Command, args []string) error {
|
||||
if docViewFind == "" && docViewList == "" {
|
||||
cmd.Usage()
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
var template string
|
||||
var info *Info
|
||||
if len(docViewList) > 0 {
|
||||
pkgPath := filepath.Join(goroot, "src", docViewList)
|
||||
if docViewList == "pkg" {
|
||||
_, err := os.Stat(pkgPath)
|
||||
if err != nil {
|
||||
pkgPath = filepath.Join(goroot, "src")
|
||||
}
|
||||
}
|
||||
info = NewListInfo(pkgPath)
|
||||
if info != nil {
|
||||
if docViewList == "pkg" {
|
||||
var filterList []DirEntry
|
||||
for _, v := range info.Dirs.List {
|
||||
if v.Path == "cmd" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(v.Path, "cmd/") {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(v.Path, "/testdata") {
|
||||
continue
|
||||
}
|
||||
filterList = append(filterList, v)
|
||||
}
|
||||
info.Dirs.List = filterList
|
||||
} else if docViewList == "cmd" {
|
||||
var filterList []DirEntry
|
||||
for _, v := range info.Dirs.List {
|
||||
if strings.Contains(v.Path, "/") {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(v.Path, "internal") {
|
||||
continue
|
||||
}
|
||||
filterList = append(filterList, v)
|
||||
}
|
||||
info.Dirs.List = filterList
|
||||
}
|
||||
}
|
||||
switch docViewMode {
|
||||
case "html":
|
||||
template = listHTML
|
||||
case "lite":
|
||||
template = listLite
|
||||
case "text":
|
||||
template = listText
|
||||
default:
|
||||
template = listText
|
||||
}
|
||||
} else if len(docViewFind) > 0 {
|
||||
dir := NewSourceDir(goroot)
|
||||
info = dir.FindInfo(docViewFind)
|
||||
switch docViewMode {
|
||||
case "html":
|
||||
template = findHTML
|
||||
case "lite":
|
||||
template = findLite
|
||||
case "text":
|
||||
template = findText
|
||||
default:
|
||||
template = findText
|
||||
}
|
||||
}
|
||||
if info == nil {
|
||||
fmt.Fprintf(os.Stderr, "<error>\n")
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
contents := info.GetPkgList(docViewMode, template)
|
||||
fmt.Fprintf(os.Stdout, "%s", contents)
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
fs FileSystem = OS // the underlying file system
|
||||
)
|
||||
|
||||
// Fake package file and name for commands. Contains the command documentation.
|
||||
const fakePkgFile = "doc.go"
|
||||
const fakePkgName = "documentation"
|
||||
|
||||
func textFmt(w io.Writer, format string, x ...interface{}) {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, x)
|
||||
template.HTMLEscape(w, buf.Bytes())
|
||||
}
|
||||
|
||||
func pathEscFmt(w io.Writer, format string, x ...interface{}) {
|
||||
switch v := x[0].(type) {
|
||||
case []byte:
|
||||
template.HTMLEscape(w, v)
|
||||
case string:
|
||||
template.HTMLEscape(w, []byte(filepath.ToSlash(v)))
|
||||
default:
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, x)
|
||||
template.HTMLEscape(w, buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
func htmlEscFmt(w io.Writer, format string, x ...interface{}) {
|
||||
switch v := x[0].(type) {
|
||||
case int:
|
||||
template.HTMLEscape(w, []byte(strconv.Itoa(v)))
|
||||
case []byte:
|
||||
template.HTMLEscape(w, v)
|
||||
case string:
|
||||
template.HTMLEscape(w, []byte(v))
|
||||
default:
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, x)
|
||||
template.HTMLEscape(w, buf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// Template formatter for "padding" format.
|
||||
func paddingFmt(w io.Writer, format string, x ...interface{}) {
|
||||
for i := x[0].(int); i > 0; i-- {
|
||||
fmt.Fprint(w, `<td width="25"></td>`)
|
||||
}
|
||||
}
|
||||
|
||||
// Template formatter for "time" format.
|
||||
func timeFmt(w io.Writer, format string, x ...interface{}) {
|
||||
template.HTMLEscape(w, []byte(time.Unix(x[0].(int64)/1e9, 0).String()))
|
||||
}
|
||||
|
||||
var fmap = template.FuncMap{
|
||||
"repeat": strings.Repeat,
|
||||
}
|
||||
|
||||
func readTemplateData(name, data string) *template.Template {
|
||||
return template.Must(template.New(name).Funcs(fmap).Parse(data))
|
||||
}
|
||||
|
||||
func readTemplateFile(name, path string) *template.Template {
|
||||
return template.Must(template.New(name).Funcs(fmap).ParseFiles(path))
|
||||
}
|
||||
|
||||
func applyTemplate(t *template.Template, name string, data interface{}) []byte {
|
||||
var buf bytes.Buffer
|
||||
if err := t.Execute(&buf, data); err != nil {
|
||||
log.Printf("%s.Execute: %s", name, err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
Find string
|
||||
Best *DirEntry
|
||||
Dirs *DirList
|
||||
}
|
||||
|
||||
type GodocDir struct {
|
||||
pkg *Directory
|
||||
cmd *Directory
|
||||
gopath []*Directory
|
||||
}
|
||||
|
||||
func NewSourceDir(goroot string) *GodocDir {
|
||||
pkgPath := filepath.Join(goroot, "src/pkg")
|
||||
_, err := os.Stat(pkgPath)
|
||||
var cmd *Directory
|
||||
if err != nil {
|
||||
pkgPath = filepath.Join(goroot, "src")
|
||||
} else {
|
||||
cmd = newDirectory(filepath.Join(goroot, "src", "cmd"), nil, -1)
|
||||
}
|
||||
pkg := newDirectory(pkgPath, nil, -1)
|
||||
ctx := build.Default
|
||||
ctx.GOROOT = ""
|
||||
var gopath []*Directory
|
||||
for _, v := range ctx.SrcDirs() {
|
||||
gopath = append(gopath, newDirectory(v, nil, -1))
|
||||
}
|
||||
return &GodocDir{pkg, cmd, gopath}
|
||||
}
|
||||
|
||||
func (dir *GodocDir) FindInfo(name string) *Info {
|
||||
max1, best1, list1 := FindDir(dir.pkg, name)
|
||||
max2, best2, list2 := FindDir(dir.cmd, name)
|
||||
var maxHeight int
|
||||
if max1 >= max2 {
|
||||
maxHeight = max1
|
||||
} else {
|
||||
maxHeight = max2
|
||||
}
|
||||
var best *DirEntry
|
||||
if best1 != nil {
|
||||
best = best1
|
||||
if best2 != nil {
|
||||
list2 = append(list2, *best2)
|
||||
}
|
||||
} else {
|
||||
best = best2
|
||||
}
|
||||
var list []DirEntry
|
||||
list = append(list, list1...)
|
||||
list = append(list, list2...)
|
||||
for _, v := range dir.gopath {
|
||||
max3, best3, list3 := FindDir(v, name)
|
||||
if max3 > maxHeight {
|
||||
maxHeight = max3
|
||||
}
|
||||
if best == nil {
|
||||
best = best3
|
||||
}
|
||||
list = append(list, list3...)
|
||||
}
|
||||
return &Info{name, best, &DirList{maxHeight, list}}
|
||||
}
|
||||
|
||||
func FindDir(dir *Directory, pkgname string) (maxHeight int, best *DirEntry, list []DirEntry) {
|
||||
if dir == nil {
|
||||
return
|
||||
}
|
||||
dirList := dir.listing(true)
|
||||
max := len(dirList.List)
|
||||
maxHeight = dirList.MaxHeight
|
||||
|
||||
for i := 0; i < max; i++ {
|
||||
name := dirList.List[i].Name
|
||||
path := filepath.ToSlash(dirList.List[i].Path)
|
||||
if name == pkgname || path == pkgname {
|
||||
best = &dirList.List[i]
|
||||
} else if strings.Contains(path, pkgname) {
|
||||
list = append(list, dirList.List[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func appendList(list1, list2 []DirEntry) []DirEntry {
|
||||
list := list1
|
||||
max := len(list2)
|
||||
for i := 0; i < max; i++ {
|
||||
list = append(list, list2[i])
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func NewListInfo(root string) *Info {
|
||||
dir := newDirectory(root, nil, -1)
|
||||
if dir == nil {
|
||||
return nil
|
||||
}
|
||||
return &Info{"", nil, dir.listing(true)}
|
||||
}
|
||||
|
||||
func FindPkgInfo(root string, pkgname string) *Info {
|
||||
dir := newDirectory(root, nil, -1)
|
||||
if dir == nil {
|
||||
return nil
|
||||
}
|
||||
dirList := dir.listing(true)
|
||||
if pkgname == "*" {
|
||||
return &Info{pkgname, nil, dirList}
|
||||
}
|
||||
var best DirEntry
|
||||
var list []DirEntry
|
||||
max := len(dirList.List)
|
||||
for i := 0; i < max; i++ {
|
||||
name := dirList.List[i].Name
|
||||
path := filepath.ToSlash(dirList.List[i].Path)
|
||||
if name == pkgname || path == pkgname {
|
||||
best = dirList.List[i]
|
||||
} else if strings.Contains(path, pkgname) {
|
||||
list = append(list, dirList.List[i])
|
||||
}
|
||||
}
|
||||
return &Info{pkgname, &best, &DirList{dirList.MaxHeight, list}}
|
||||
}
|
||||
|
||||
func (info *Info) GetPkgList(name, templateData string) []byte {
|
||||
data := readTemplateData(name, templateData)
|
||||
return applyTemplate(data, "pkglist", info)
|
||||
}
|
||||
|
||||
var listHTML = `<!-- Golang Package List -->
|
||||
<p class="detail">
|
||||
Need more packages? The
|
||||
<a href="http://godashboard.appspot.com/package">Package Dashboard</a>
|
||||
provides a list of <a href="/cmd/goinstall/">goinstallable</a> packages.
|
||||
</p>
|
||||
<h2 id="Subdirectories">Subdirectories</h2>
|
||||
<p>
|
||||
{{with .Dirs}}
|
||||
<p>
|
||||
<table class="layout">
|
||||
<tr>
|
||||
<th align="left" colspan="{{html .MaxHeight}}">Name</th>
|
||||
<td width="25"> </td>
|
||||
<th align="left">Synopsis</th>
|
||||
</tr>
|
||||
{{range .List}}
|
||||
<tr>
|
||||
{{repeat "<td width=\"25\"></td>" .Depth}}
|
||||
<td align="left" colspan="{{html .Height}}"><a href="{{.Path}}">{{html .Name}}</a></td>
|
||||
<td></td>
|
||||
<td align="left">{{html .Synopsis}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
</p>
|
||||
{{end}}`
|
||||
|
||||
var listText = `$list
|
||||
{{with .Dirs}}
|
||||
{{range .List}}{{.Path }}
|
||||
{{end}}
|
||||
{{end}}`
|
||||
|
||||
var listLite = `$list{{with .Dirs}}{{range .List}},{{.Path}}{{end}}{{end}}`
|
||||
|
||||
var findHTML = `<!-- Golang Package List -->
|
||||
<p class="detail">
|
||||
Need more packages? The
|
||||
<a href="http://godashboard.appspot.com/package">Package Dashboard</a>
|
||||
provides a list of <a href="/cmd/goinstall/">goinstallable</a> packages.
|
||||
</p>
|
||||
<h2 id="Subdirectories">Subdirectories</h2>
|
||||
<table class="layout">
|
||||
<tr>
|
||||
<th align="left">Best</th>
|
||||
<td width="25"> </td>
|
||||
<th align="left">Synopsis</th>
|
||||
{{with .Best}}
|
||||
<tr>
|
||||
<td align="left"><a href="{{html .Path}}">{{.Path}}</a></td>
|
||||
<td></td>
|
||||
<td align="left">{{html .Synopsis}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
{{with .Dirs}}
|
||||
<tr>
|
||||
<th align="left">Match</th>
|
||||
<td width="25"> </td>
|
||||
<th align="left">Synopsis</th>
|
||||
</tr>
|
||||
{{range .List}}
|
||||
<tr>
|
||||
<td align="left"><a href="{{html .Path}}">{{.Path}}</a></td>
|
||||
<td></td>
|
||||
<td align="left">{{html .Synopsis}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
</p>
|
||||
{{end}}`
|
||||
|
||||
var findText = `$best
|
||||
{{with .Best}}{{.Path}}{{end}}
|
||||
$list
|
||||
{{with .Dirs}}{{range .List}}{{.Path}}
|
||||
{{end}}{{end}}`
|
||||
|
||||
var findLite = `$find,{{with .Best}}{{.Path}}{{end}}{{with .Dirs}}{{range .List}},{{.Path}}{{end}}{{end}}`
|
|
@ -1,668 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package doc extracts source code documentation from a Go AST.
|
||||
package docview
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type typeDoc struct {
|
||||
// len(decl.Specs) == 1, and the element type is *ast.TypeSpec
|
||||
// if the type declaration hasn't been seen yet, decl is nil
|
||||
decl *ast.GenDecl
|
||||
// values, factory functions, and methods associated with the type
|
||||
values []*ast.GenDecl // consts and vars
|
||||
factories map[string]*ast.FuncDecl
|
||||
methods map[string]*ast.FuncDecl
|
||||
}
|
||||
|
||||
// docReader accumulates documentation for a single package.
|
||||
// It modifies the AST: Comments (declaration documentation)
|
||||
// that have been collected by the DocReader are set to nil
|
||||
// in the respective AST nodes so that they are not printed
|
||||
// twice (once when printing the documentation and once when
|
||||
// printing the corresponding AST node).
|
||||
//
|
||||
type docReader struct {
|
||||
doc *ast.CommentGroup // package documentation, if any
|
||||
pkgName string
|
||||
showAll bool
|
||||
values []*ast.GenDecl // consts and vars
|
||||
types map[string]*typeDoc
|
||||
funcs map[string]*ast.FuncDecl
|
||||
imports map[string]int
|
||||
bugs []*ast.CommentGroup
|
||||
}
|
||||
|
||||
func (doc *docReader) init(pkgName string, showAll bool) {
|
||||
doc.pkgName = pkgName
|
||||
doc.showAll = showAll
|
||||
doc.imports = make(map[string]int)
|
||||
doc.types = make(map[string]*typeDoc)
|
||||
doc.funcs = make(map[string]*ast.FuncDecl)
|
||||
}
|
||||
|
||||
func (doc *docReader) addDoc(comments *ast.CommentGroup) {
|
||||
if doc.doc == nil {
|
||||
// common case: just one package comment
|
||||
doc.doc = comments
|
||||
return
|
||||
}
|
||||
|
||||
// More than one package comment: Usually there will be only
|
||||
// one file with a package comment, but it's better to collect
|
||||
// all comments than drop them on the floor.
|
||||
// (This code isn't particularly clever - no amortized doubling is
|
||||
// used - but this situation occurs rarely and is not time-critical.)
|
||||
n1 := len(doc.doc.List)
|
||||
n2 := len(comments.List)
|
||||
list := make([]*ast.Comment, n1+1+n2) // + 1 for separator line
|
||||
copy(list, doc.doc.List)
|
||||
list[n1] = &ast.Comment{token.NoPos, "//"} // separator line
|
||||
copy(list[n1+1:], comments.List)
|
||||
doc.doc = &ast.CommentGroup{list}
|
||||
}
|
||||
|
||||
func (doc *docReader) addType(decl *ast.GenDecl) {
|
||||
spec := decl.Specs[0].(*ast.TypeSpec)
|
||||
typ := doc.lookupTypeDoc(spec.Name.Name)
|
||||
// typ should always be != nil since declared types
|
||||
// are always named - be conservative and check
|
||||
if typ != nil {
|
||||
// a type should be added at most once, so typ.decl
|
||||
// should be nil - if it isn't, simply overwrite it
|
||||
typ.decl = decl
|
||||
}
|
||||
}
|
||||
|
||||
func (doc *docReader) lookupTypeDoc(name string) *typeDoc {
|
||||
if name == "" {
|
||||
return nil // no type docs for anonymous types
|
||||
}
|
||||
if tdoc, found := doc.types[name]; found {
|
||||
return tdoc
|
||||
}
|
||||
// type wasn't found - add one without declaration
|
||||
tdoc := &typeDoc{nil, nil, make(map[string]*ast.FuncDecl), make(map[string]*ast.FuncDecl)}
|
||||
doc.types[name] = tdoc
|
||||
return tdoc
|
||||
}
|
||||
|
||||
func docBaseTypeName(typ ast.Expr, showAll bool) string {
|
||||
switch t := typ.(type) {
|
||||
case *ast.Ident:
|
||||
// if the type is not exported, the effect to
|
||||
// a client is as if there were no type name
|
||||
if showAll || t.IsExported() {
|
||||
return t.Name
|
||||
}
|
||||
case *ast.StarExpr:
|
||||
return docBaseTypeName(t.X, showAll)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (doc *docReader) addValue(decl *ast.GenDecl) {
|
||||
// determine if decl should be associated with a type
|
||||
// Heuristic: For each typed entry, determine the type name, if any.
|
||||
// If there is exactly one type name that is sufficiently
|
||||
// frequent, associate the decl with the respective type.
|
||||
domName := ""
|
||||
domFreq := 0
|
||||
prev := ""
|
||||
for _, s := range decl.Specs {
|
||||
if v, ok := s.(*ast.ValueSpec); ok {
|
||||
name := ""
|
||||
switch {
|
||||
case v.Type != nil:
|
||||
// a type is present; determine its name
|
||||
name = docBaseTypeName(v.Type, doc.showAll)
|
||||
case decl.Tok == token.CONST:
|
||||
// no type is present but we have a constant declaration;
|
||||
// use the previous type name (w/o more type information
|
||||
// we cannot handle the case of unnamed variables with
|
||||
// initializer expressions except for some trivial cases)
|
||||
name = prev
|
||||
}
|
||||
if name != "" {
|
||||
// entry has a named type
|
||||
if domName != "" && domName != name {
|
||||
// more than one type name - do not associate
|
||||
// with any type
|
||||
domName = ""
|
||||
break
|
||||
}
|
||||
domName = name
|
||||
domFreq++
|
||||
}
|
||||
prev = name
|
||||
}
|
||||
}
|
||||
|
||||
// determine values list
|
||||
const threshold = 0.75
|
||||
values := &doc.values
|
||||
if domName != "" && domFreq >= int(float64(len(decl.Specs))*threshold) {
|
||||
// typed entries are sufficiently frequent
|
||||
typ := doc.lookupTypeDoc(domName)
|
||||
if typ != nil {
|
||||
values = &typ.values // associate with that type
|
||||
}
|
||||
}
|
||||
|
||||
*values = append(*values, decl)
|
||||
}
|
||||
|
||||
// Helper function to set the table entry for function f. Makes sure that
|
||||
// at least one f with associated documentation is stored in table, if there
|
||||
// are multiple f's with the same name.
|
||||
func setFunc(table map[string]*ast.FuncDecl, f *ast.FuncDecl) {
|
||||
name := f.Name.Name
|
||||
if g, exists := table[name]; exists && g.Doc != nil {
|
||||
// a function with the same name has already been registered;
|
||||
// since it has documentation, assume f is simply another
|
||||
// implementation and ignore it
|
||||
// TODO(gri) consider collecting all functions, or at least
|
||||
// all comments
|
||||
return
|
||||
}
|
||||
// function doesn't exist or has no documentation; use f
|
||||
table[name] = f
|
||||
}
|
||||
|
||||
func (doc *docReader) addFunc(fun *ast.FuncDecl) {
|
||||
name := fun.Name.Name
|
||||
|
||||
// determine if it should be associated with a type
|
||||
if fun.Recv != nil {
|
||||
// method
|
||||
typ := doc.lookupTypeDoc(docBaseTypeName(fun.Recv.List[0].Type, doc.showAll))
|
||||
if typ != nil {
|
||||
// exported receiver type
|
||||
setFunc(typ.methods, fun)
|
||||
}
|
||||
// otherwise don't show the method
|
||||
// TODO(gri): There may be exported methods of non-exported types
|
||||
// that can be called because of exported values (consts, vars, or
|
||||
// function results) of that type. Could determine if that is the
|
||||
// case and then show those methods in an appropriate section.
|
||||
return
|
||||
}
|
||||
|
||||
// perhaps a factory function
|
||||
// determine result type, if any
|
||||
if fun.Type.Results.NumFields() >= 1 {
|
||||
res := fun.Type.Results.List[0]
|
||||
if len(res.Names) <= 1 {
|
||||
// exactly one (named or anonymous) result associated
|
||||
// with the first type in result signature (there may
|
||||
// be more than one result)
|
||||
tname := docBaseTypeName(res.Type, doc.showAll)
|
||||
typ := doc.lookupTypeDoc(tname)
|
||||
if typ != nil {
|
||||
// named and exported result type
|
||||
|
||||
// Work-around for failure of heuristic: In package os
|
||||
// too many functions are considered factory functions
|
||||
// for the Error type. Eliminate manually for now as
|
||||
// this appears to be the only important case in the
|
||||
// current library where the heuristic fails.
|
||||
if doc.pkgName == "os" && tname == "Error" &&
|
||||
name != "NewError" && name != "NewSyscallError" {
|
||||
// not a factory function for os.Error
|
||||
setFunc(doc.funcs, fun) // treat as ordinary function
|
||||
return
|
||||
}
|
||||
|
||||
setFunc(typ.factories, fun)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ordinary function
|
||||
setFunc(doc.funcs, fun)
|
||||
}
|
||||
|
||||
func (doc *docReader) addDecl(decl ast.Decl) {
|
||||
switch d := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
if len(d.Specs) > 0 {
|
||||
switch d.Tok {
|
||||
case token.IMPORT:
|
||||
// imports are handled individually
|
||||
for _, spec := range d.Specs {
|
||||
if s, ok := spec.(*ast.ImportSpec); ok {
|
||||
if import_, err := strconv.Unquote(s.Path.Value); err == nil {
|
||||
doc.imports[import_] = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
case token.CONST, token.VAR:
|
||||
// constants and variables are always handled as a group
|
||||
doc.addValue(d)
|
||||
case token.TYPE:
|
||||
// types are handled individually
|
||||
for _, spec := range d.Specs {
|
||||
// make a (fake) GenDecl node for this TypeSpec
|
||||
// (we need to do this here - as opposed to just
|
||||
// for printing - so we don't lose the GenDecl
|
||||
// documentation)
|
||||
//
|
||||
// TODO(gri): Consider just collecting the TypeSpec
|
||||
// node (and copy in the GenDecl.doc if there is no
|
||||
// doc in the TypeSpec - this is currently done in
|
||||
// makeTypeDocs below). Simpler data structures, but
|
||||
// would lose GenDecl documentation if the TypeSpec
|
||||
// has documentation as well.
|
||||
doc.addType(&ast.GenDecl{d.Doc, d.Pos(), token.TYPE, token.NoPos, []ast.Spec{spec}, token.NoPos})
|
||||
// A new GenDecl node is created, no need to nil out d.Doc.
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
doc.addFunc(d)
|
||||
}
|
||||
}
|
||||
|
||||
func copyCommentList(list []*ast.Comment) []*ast.Comment {
|
||||
return append([]*ast.Comment(nil), list...)
|
||||
}
|
||||
|
||||
var (
|
||||
bug_markers = regexp.MustCompile("^/[/*][ \t]*BUG\\(.*\\):[ \t]*") // BUG(uid):
|
||||
bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
|
||||
)
|
||||
|
||||
// addFile adds the AST for a source file to the docReader.
|
||||
// Adding the same AST multiple times is a no-op.
|
||||
//
|
||||
func (doc *docReader) addFile(src *ast.File) {
|
||||
// add package documentation
|
||||
if src.Doc != nil {
|
||||
doc.addDoc(src.Doc)
|
||||
src.Doc = nil // doc consumed - remove from ast.File node
|
||||
}
|
||||
|
||||
// add all declarations
|
||||
for _, decl := range src.Decls {
|
||||
doc.addDecl(decl)
|
||||
}
|
||||
|
||||
// collect BUG(...) comments
|
||||
for _, c := range src.Comments {
|
||||
text := c.List[0].Text
|
||||
if m := bug_markers.FindStringIndex(text); m != nil {
|
||||
// found a BUG comment; maybe empty
|
||||
if btxt := text[m[1]:]; bug_content.MatchString(btxt) {
|
||||
// non-empty BUG comment; collect comment without BUG prefix
|
||||
list := copyCommentList(c.List)
|
||||
list[0].Text = text[m[1]:]
|
||||
doc.bugs = append(doc.bugs, &ast.CommentGroup{list})
|
||||
}
|
||||
}
|
||||
}
|
||||
src.Comments = nil // consumed unassociated comments - remove from ast.File node
|
||||
}
|
||||
|
||||
func NewFileDoc(file *ast.File, showAll bool) *PackageDoc {
|
||||
var r docReader
|
||||
r.init(file.Name.Name, showAll)
|
||||
r.addFile(file)
|
||||
return r.newDoc("", nil)
|
||||
}
|
||||
|
||||
func NewPackageDoc(pkg *ast.Package, importpath string, showAll bool) *PackageDoc {
|
||||
var r docReader
|
||||
r.init(pkg.Name, showAll)
|
||||
filenames := make([]string, len(pkg.Files))
|
||||
i := 0
|
||||
for filename, f := range pkg.Files {
|
||||
r.addFile(f)
|
||||
filenames[i] = filename
|
||||
i++
|
||||
}
|
||||
return r.newDoc(importpath, filenames)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Conversion to external representation
|
||||
|
||||
// ValueDoc is the documentation for a group of declared
|
||||
// values, either vars or consts.
|
||||
//
|
||||
type ValueDoc struct {
|
||||
Doc string
|
||||
Decl *ast.GenDecl
|
||||
order int
|
||||
}
|
||||
|
||||
type sortValueDoc []*ValueDoc
|
||||
|
||||
func (p sortValueDoc) Len() int { return len(p) }
|
||||
func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func declName(d *ast.GenDecl) string {
|
||||
if len(d.Specs) != 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch v := d.Specs[0].(type) {
|
||||
case *ast.ValueSpec:
|
||||
return v.Names[0].Name
|
||||
case *ast.TypeSpec:
|
||||
return v.Name.Name
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p sortValueDoc) Less(i, j int) bool {
|
||||
// sort by name
|
||||
// pull blocks (name = "") up to top
|
||||
// in original order
|
||||
if ni, nj := declName(p[i].Decl), declName(p[j].Decl); ni != nj {
|
||||
return ni < nj
|
||||
}
|
||||
return p[i].order < p[j].order
|
||||
}
|
||||
|
||||
func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc {
|
||||
d := make([]*ValueDoc, len(list)) // big enough in any case
|
||||
n := 0
|
||||
for i, decl := range list {
|
||||
if decl.Tok == tok {
|
||||
d[n] = &ValueDoc{decl.Doc.Text(), decl, i}
|
||||
n++
|
||||
decl.Doc = nil // doc consumed - removed from AST
|
||||
}
|
||||
}
|
||||
d = d[0:n]
|
||||
sort.Sort(sortValueDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
// FuncDoc is the documentation for a func declaration,
|
||||
// either a top-level function or a method function.
|
||||
//
|
||||
type FuncDoc struct {
|
||||
Doc string
|
||||
Recv ast.Expr // TODO(rsc): Would like string here
|
||||
Name string
|
||||
Decl *ast.FuncDecl
|
||||
}
|
||||
|
||||
type sortFuncDoc []*FuncDoc
|
||||
|
||||
func (p sortFuncDoc) Len() int { return len(p) }
|
||||
func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name }
|
||||
|
||||
func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc {
|
||||
d := make([]*FuncDoc, len(m))
|
||||
i := 0
|
||||
for _, f := range m {
|
||||
doc := new(FuncDoc)
|
||||
doc.Doc = f.Doc.Text()
|
||||
f.Doc = nil // doc consumed - remove from ast.FuncDecl node
|
||||
if f.Recv != nil {
|
||||
doc.Recv = f.Recv.List[0].Type
|
||||
}
|
||||
doc.Name = f.Name.Name
|
||||
doc.Decl = f
|
||||
d[i] = doc
|
||||
i++
|
||||
}
|
||||
sort.Sort(sortFuncDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
// TypeDoc is the documentation for a declared type.
|
||||
// Consts and Vars are sorted lists of constants and variables of (mostly) that type.
|
||||
// Factories is a sorted list of factory functions that return that type.
|
||||
// Methods is a sorted list of method functions on that type.
|
||||
type TypeDoc struct {
|
||||
Doc string
|
||||
Type *ast.TypeSpec
|
||||
Consts []*ValueDoc
|
||||
Vars []*ValueDoc
|
||||
Funcs []*FuncDoc
|
||||
Methods []*FuncDoc
|
||||
Decl *ast.GenDecl
|
||||
order int
|
||||
}
|
||||
|
||||
type sortTypeDoc []*TypeDoc
|
||||
|
||||
func (p sortTypeDoc) Len() int { return len(p) }
|
||||
func (p sortTypeDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p sortTypeDoc) Less(i, j int) bool {
|
||||
// sort by name
|
||||
// pull blocks (name = "") up to top
|
||||
// in original order
|
||||
if ni, nj := p[i].Type.Name.Name, p[j].Type.Name.Name; ni != nj {
|
||||
return ni < nj
|
||||
}
|
||||
return p[i].order < p[j].order
|
||||
}
|
||||
|
||||
// NOTE(rsc): This would appear not to be correct for type ( )
|
||||
// blocks, but the doc extractor above has split them into
|
||||
// individual declarations.
|
||||
func (doc *docReader) makeTypeDocs(m map[string]*typeDoc) []*TypeDoc {
|
||||
d := make([]*TypeDoc, len(m))
|
||||
i := 0
|
||||
for _, old := range m {
|
||||
// all typeDocs should have a declaration associated with
|
||||
// them after processing an entire package - be conservative
|
||||
// and check
|
||||
if decl := old.decl; decl != nil {
|
||||
typespec := decl.Specs[0].(*ast.TypeSpec)
|
||||
t := new(TypeDoc)
|
||||
doc := typespec.Doc
|
||||
typespec.Doc = nil // doc consumed - remove from ast.TypeSpec node
|
||||
if doc == nil {
|
||||
// no doc associated with the spec, use the declaration doc, if any
|
||||
doc = decl.Doc
|
||||
}
|
||||
decl.Doc = nil // doc consumed - remove from ast.Decl node
|
||||
t.Doc = doc.Text()
|
||||
t.Type = typespec
|
||||
t.Consts = makeValueDocs(old.values, token.CONST)
|
||||
t.Vars = makeValueDocs(old.values, token.VAR)
|
||||
t.Funcs = makeFuncDocs(old.factories)
|
||||
t.Methods = makeFuncDocs(old.methods)
|
||||
t.Decl = old.decl
|
||||
t.order = i
|
||||
d[i] = t
|
||||
i++
|
||||
} else {
|
||||
// no corresponding type declaration found - move any associated
|
||||
// values, factory functions, and methods back to the top-level
|
||||
// so that they are not lost (this should only happen if a package
|
||||
// file containing the explicit type declaration is missing or if
|
||||
// an unqualified type name was used after a "." import)
|
||||
// 1) move values
|
||||
doc.values = append(doc.values, old.values...)
|
||||
// 2) move factory functions
|
||||
for name, f := range old.factories {
|
||||
doc.funcs[name] = f
|
||||
}
|
||||
// 3) move methods
|
||||
for name, f := range old.methods {
|
||||
// don't overwrite functions with the same name
|
||||
if _, found := doc.funcs[name]; !found {
|
||||
doc.funcs[name] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
d = d[0:i] // some types may have been ignored
|
||||
sort.Sort(sortTypeDoc(d))
|
||||
return d
|
||||
}
|
||||
|
||||
func makeBugDocs(list []*ast.CommentGroup) []string {
|
||||
d := make([]string, len(list))
|
||||
for i, g := range list {
|
||||
d[i] = g.Text()
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// PackageDoc is the documentation for an entire package.
|
||||
//
|
||||
type PackageDoc struct {
|
||||
PackageName string
|
||||
ImportPath string
|
||||
Imports []string
|
||||
Filenames []string
|
||||
Doc string
|
||||
Consts []*ValueDoc
|
||||
Types []*TypeDoc
|
||||
Vars []*ValueDoc
|
||||
Funcs []*FuncDoc
|
||||
Bugs []string
|
||||
}
|
||||
|
||||
// newDoc returns the accumulated documentation for the package.
|
||||
//
|
||||
func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc {
|
||||
p := new(PackageDoc)
|
||||
p.PackageName = doc.pkgName
|
||||
p.ImportPath = importpath
|
||||
sort.Strings(filenames)
|
||||
p.Filenames = filenames
|
||||
p.Doc = doc.doc.Text()
|
||||
p.Imports = sortedKeys(doc.imports)
|
||||
// makeTypeDocs may extend the list of doc.values and
|
||||
// doc.funcs and thus must be called before any other
|
||||
// function consuming those lists
|
||||
p.Types = doc.makeTypeDocs(doc.types)
|
||||
p.Consts = makeValueDocs(doc.values, token.CONST)
|
||||
p.Vars = makeValueDocs(doc.values, token.VAR)
|
||||
p.Funcs = makeFuncDocs(doc.funcs)
|
||||
p.Bugs = makeBugDocs(doc.bugs)
|
||||
return p
|
||||
}
|
||||
|
||||
func sortedKeys(m map[string]int) []string {
|
||||
list := make([]string, len(m))
|
||||
i := 0
|
||||
for key := range m {
|
||||
list[i] = key
|
||||
i++
|
||||
}
|
||||
sort.Strings(list)
|
||||
return list
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filtering by name
|
||||
|
||||
type Filter func(string) bool
|
||||
|
||||
func matchFields(fields *ast.FieldList, f Filter) bool {
|
||||
if fields != nil {
|
||||
for _, field := range fields.List {
|
||||
for _, name := range field.Names {
|
||||
if f(name.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func matchDecl(d *ast.GenDecl, f Filter) bool {
|
||||
for _, d := range d.Specs {
|
||||
switch v := d.(type) {
|
||||
case *ast.ValueSpec:
|
||||
for _, name := range v.Names {
|
||||
if f(name.Name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
if f(v.Name.Name) {
|
||||
return true
|
||||
}
|
||||
switch t := v.Type.(type) {
|
||||
case *ast.StructType:
|
||||
if matchFields(t.Fields, f) {
|
||||
return true
|
||||
}
|
||||
case *ast.InterfaceType:
|
||||
if matchFields(t.Methods, f) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc {
|
||||
w := 0
|
||||
for _, vd := range a {
|
||||
if matchDecl(vd.Decl, f) {
|
||||
a[w] = vd
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc {
|
||||
w := 0
|
||||
for _, fd := range a {
|
||||
if f(fd.Name) {
|
||||
a[w] = fd
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc {
|
||||
w := 0
|
||||
for _, td := range a {
|
||||
n := 0 // number of matches
|
||||
if matchDecl(td.Decl, f) {
|
||||
n = 1
|
||||
} else {
|
||||
// type name doesn't match, but we may have matching consts, vars, factories or methods
|
||||
td.Consts = filterValueDocs(td.Consts, f)
|
||||
td.Vars = filterValueDocs(td.Vars, f)
|
||||
td.Funcs = filterFuncDocs(td.Funcs, f)
|
||||
td.Methods = filterFuncDocs(td.Methods, f)
|
||||
n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
|
||||
}
|
||||
if n > 0 {
|
||||
a[w] = td
|
||||
w++
|
||||
}
|
||||
}
|
||||
return a[0:w]
|
||||
}
|
||||
|
||||
// Filter eliminates documentation for names that don't pass through the filter f.
|
||||
// TODO: Recognize "Type.Method" as a name.
|
||||
//
|
||||
func (p *PackageDoc) Filter(f Filter) {
|
||||
p.Consts = filterValueDocs(p.Consts, f)
|
||||
p.Vars = filterValueDocs(p.Vars, f)
|
||||
p.Types = filterTypeDocs(p.Types, f)
|
||||
p.Funcs = filterFuncDocs(p.Funcs, f)
|
||||
p.Doc = "" // don't show top-level package doc
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file defines types for abstract file system access and
|
||||
// provides an implementation accessing the file system of the
|
||||
// underlying OS.
|
||||
|
||||
package docview
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// The FileSystem interface specifies the methods godoc is using
|
||||
// to access the file system for which it serves documentation.
|
||||
type FileSystem interface {
|
||||
Open(path string) (io.ReadCloser, error)
|
||||
Lstat(path string) (os.FileInfo, error)
|
||||
Stat(path string) (os.FileInfo, error)
|
||||
ReadDir(path string) ([]os.FileInfo, error)
|
||||
}
|
||||
|
||||
// ReadFile reads the file named by path from fs and returns the contents.
|
||||
func ReadFile(fs FileSystem, path string) ([]byte, error) {
|
||||
rc, err := fs.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// OS-specific FileSystem implementation
|
||||
|
||||
var OS FileSystem = osFS{}
|
||||
|
||||
// osFS is the OS-specific implementation of FileSystem
|
||||
type osFS struct{}
|
||||
|
||||
func (osFS) Open(path string) (io.ReadCloser, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, fmt.Errorf("Open: %s is a directory", path)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (osFS) Lstat(path string) (os.FileInfo, error) {
|
||||
return os.Lstat(path)
|
||||
}
|
||||
|
||||
func (osFS) Stat(path string) (os.FileInfo, error) {
|
||||
return os.Stat(path)
|
||||
}
|
||||
|
||||
func (osFS) ReadDir(path string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(path) // is sorted
|
||||
}
|
|
@ -1,609 +0,0 @@
|
|||
// Copyright 2013 The rspace Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Doc is a simple document printer that produces the doc comments for its
|
||||
// argument symbols, plus a link to the full documentation and a pointer to
|
||||
// the source. It has a more Go-like UI than godoc. It can also search for
|
||||
// symbols by looking in all packages, and case is ignored. For instance:
|
||||
// doc isupper
|
||||
// will find unicode.IsUpper.
|
||||
//
|
||||
// The -pkg flag retrieves package-level doc comments only.
|
||||
//
|
||||
// Usage:
|
||||
// doc pkg.name # "doc io.Writer"
|
||||
// doc pkg name # "doc fmt Printf"
|
||||
// doc name # "doc isupper" (finds unicode.IsUpper)
|
||||
// doc -pkg pkg # "doc fmt"
|
||||
//
|
||||
// The pkg is the last element of the package path;
|
||||
// no slashes (ast.Node not go/ast.Node).
|
||||
//
|
||||
// Flags
|
||||
// -c(onst) -f(unc) -i(nterface) -m(ethod) -s(truct) -t(ype) -v(ar)
|
||||
// restrict hits to declarations of the corresponding kind.
|
||||
// Flags
|
||||
// -doc -src -url
|
||||
// restrict printing to the documentation, source path, or godoc URL.
|
||||
package finddoc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/visualfc/gotools/command"
|
||||
_ "golang.org/x/tools/go/gcimporter"
|
||||
"golang.org/x/tools/go/types"
|
||||
)
|
||||
|
||||
const usageDoc = `Find documentation for names.
|
||||
usage:
|
||||
doc pkg.name # "doc io.Writer"
|
||||
doc pkg name # "doc fmt Printf"
|
||||
doc name # "doc isupper" finds unicode.IsUpper
|
||||
doc -pkg pkg # "doc fmt"
|
||||
doc -r expr # "doc -r '.*exported'"
|
||||
pkg is the last component of any package, e.g. fmt, parser
|
||||
name is the name of an exported symbol; case is ignored in matches.
|
||||
|
||||
The name may also be a regular expression to select which names
|
||||
to match. In regular expression searches, case is ignored and
|
||||
the pattern must match the entire name, so ".?print" will match
|
||||
Print, Fprint and Sprint but not Fprintf.
|
||||
|
||||
Flags
|
||||
-c(onst) -f(unc) -i(nterface) -m(ethod) -s(truct) -t(ype) -v(ar)
|
||||
restrict hits to declarations of the corresponding kind.
|
||||
Flags
|
||||
-doc -src -url
|
||||
restrict printing to the documentation, source path, or godoc URL.
|
||||
Flag
|
||||
-r
|
||||
takes a single argument (no package), a name or regular expression
|
||||
to search for in all packages.
|
||||
`
|
||||
|
||||
var Command = &command.Command{
|
||||
Run: runDoc,
|
||||
UsageLine: "finddoc [pkg.name|pkg name|-pkg name]",
|
||||
Short: "golang doc lookup",
|
||||
Long: usageDoc,
|
||||
}
|
||||
|
||||
var (
|
||||
// If none is set, all are set.
|
||||
docFlag bool
|
||||
srcFlag bool
|
||||
urlFlag bool
|
||||
regexpFlag bool
|
||||
matchWordFlag bool
|
||||
matchCaseFlag bool
|
||||
constantFlag bool
|
||||
functionFlag bool
|
||||
interfaceFlag bool
|
||||
methodFlag bool
|
||||
packageFlag bool
|
||||
structFlag bool
|
||||
typeFlag bool
|
||||
variableFlag bool
|
||||
urlHeadTag string
|
||||
)
|
||||
|
||||
func init() {
|
||||
Command.Flag.BoolVar(&docFlag, "doc", false, "restrict output to documentation only")
|
||||
Command.Flag.BoolVar(&srcFlag, "src", false, "restrict output to source file only")
|
||||
Command.Flag.BoolVar(&urlFlag, "url", false, "restrict output to godoc URL only")
|
||||
Command.Flag.BoolVar(®expFlag, "r", false, "single argument is a regular expression for a name")
|
||||
Command.Flag.BoolVar(&matchWordFlag, "word", false, "search match whole word")
|
||||
Command.Flag.BoolVar(&matchCaseFlag, "case", false, "search match case")
|
||||
|
||||
Command.Flag.BoolVar(&constantFlag, "const", false, "show doc for consts only")
|
||||
Command.Flag.BoolVar(&functionFlag, "func", false, "show doc for funcs only")
|
||||
Command.Flag.BoolVar(&interfaceFlag, "interface", false, "show doc for interfaces only")
|
||||
Command.Flag.BoolVar(&methodFlag, "method", false, "show doc for methods only")
|
||||
Command.Flag.BoolVar(&packageFlag, "package", false, "show top-level package doc only")
|
||||
Command.Flag.BoolVar(&structFlag, "struct", false, "show doc for structs only")
|
||||
Command.Flag.BoolVar(&typeFlag, "type", false, "show doc for types only")
|
||||
Command.Flag.BoolVar(&variableFlag, "var", false, "show doc for vars only")
|
||||
|
||||
Command.Flag.BoolVar(&constantFlag, "c", false, "alias for -const")
|
||||
Command.Flag.BoolVar(&functionFlag, "f", false, "alias for -func")
|
||||
Command.Flag.BoolVar(&interfaceFlag, "i", false, "alias for -interface")
|
||||
Command.Flag.BoolVar(&methodFlag, "m", false, "alias for -method")
|
||||
Command.Flag.BoolVar(&packageFlag, "pkg", false, "alias for -package")
|
||||
Command.Flag.BoolVar(&structFlag, "s", false, "alias for -struct")
|
||||
Command.Flag.BoolVar(&typeFlag, "t", false, "alias for -type")
|
||||
Command.Flag.BoolVar(&variableFlag, "v", false, "alias for -var")
|
||||
|
||||
Command.Flag.StringVar(&urlHeadTag, "urltag", "", "url head tag, liteide provate")
|
||||
}
|
||||
|
||||
func runDoc(cmd *command.Command, args []string) error {
|
||||
if !(constantFlag || functionFlag || interfaceFlag || methodFlag || packageFlag || structFlag || typeFlag || variableFlag) { // none set
|
||||
constantFlag = true
|
||||
functionFlag = true
|
||||
methodFlag = true
|
||||
// Not package! It's special.
|
||||
typeFlag = true
|
||||
variableFlag = true
|
||||
}
|
||||
if !(docFlag || srcFlag || urlFlag) {
|
||||
docFlag = true
|
||||
srcFlag = true
|
||||
urlFlag = true
|
||||
}
|
||||
var pkg, name string
|
||||
switch len(args) {
|
||||
case 1:
|
||||
if packageFlag {
|
||||
pkg = args[0]
|
||||
} else if regexpFlag {
|
||||
name = args[0]
|
||||
} else if strings.Contains(args[0], ".") {
|
||||
pkg, name = split(args[0])
|
||||
} else {
|
||||
name = args[0]
|
||||
}
|
||||
case 2:
|
||||
if packageFlag {
|
||||
cmd.Usage()
|
||||
}
|
||||
pkg, name = args[0], args[1]
|
||||
default:
|
||||
cmd.Usage()
|
||||
return os.ErrInvalid
|
||||
}
|
||||
if strings.Contains(pkg, "/") {
|
||||
fmt.Fprintf(os.Stderr, "doc: package name cannot contain slash (TODO)\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
for _, path := range Paths(pkg) {
|
||||
lookInDirectory(path, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var slash = string(filepath.Separator)
|
||||
var slashDot = string(filepath.Separator) + "."
|
||||
var goRootSrcPkg = filepath.Join(runtime.GOROOT(), "src", "pkg")
|
||||
var goRootSrcCmd = filepath.Join(runtime.GOROOT(), "src", "cmd")
|
||||
var goPaths = SplitGopath()
|
||||
|
||||
func split(arg string) (pkg, name string) {
|
||||
dot := strings.IndexRune(arg, '.') // We know there's one there.
|
||||
return arg[0:dot], arg[dot+1:]
|
||||
}
|
||||
|
||||
func Paths(pkg string) []string {
|
||||
pkgs := pathsFor(runtime.GOROOT(), pkg)
|
||||
for _, root := range goPaths {
|
||||
pkgs = append(pkgs, pathsFor(root, pkg)...)
|
||||
}
|
||||
return pkgs
|
||||
}
|
||||
|
||||
func SplitGopath() []string {
|
||||
gopath := os.Getenv("GOPATH")
|
||||
if gopath == "" {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(gopath, string(os.PathListSeparator))
|
||||
}
|
||||
|
||||
// pathsFor recursively walks the tree looking for possible directories for the package:
|
||||
// those whose basename is pkg.
|
||||
func pathsFor(root, pkg string) []string {
|
||||
root = path.Join(root, "src")
|
||||
pkgPaths := make([]string, 0, 10)
|
||||
visit := func(pathName string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
// One package per directory. Ignore the files themselves.
|
||||
if !f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
// No .hg or other dot nonsense please.
|
||||
if strings.Contains(pathName, slashDot) {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
// Is the last element of the path correct
|
||||
if pkg == "" || filepath.Base(pathName) == pkg {
|
||||
pkgPaths = append(pkgPaths, pathName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filepath.Walk(root, visit)
|
||||
return pkgPaths
|
||||
}
|
||||
|
||||
// lookInDirectory looks in the package (if any) in the directory for the named exported identifier.
|
||||
func lookInDirectory(directory, name string) {
|
||||
fset := token.NewFileSet()
|
||||
pkgs, _ := parser.ParseDir(fset, directory, nil, parser.ParseComments) // Ignore the error.
|
||||
for _, pkg := range pkgs {
|
||||
if pkg.Name == "main" || strings.HasSuffix(pkg.Name, "_test") {
|
||||
continue
|
||||
}
|
||||
doPackage(pkg, fset, name)
|
||||
}
|
||||
}
|
||||
|
||||
// prefixDirectory places the directory name on the beginning of each name in the list.
|
||||
func prefixDirectory(directory string, names []string) {
|
||||
if directory != "." {
|
||||
for i, name := range names {
|
||||
names[i] = filepath.Join(directory, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// File is a wrapper for the state of a file used in the parser.
|
||||
// The parse tree walkers are all methods of this type.
|
||||
type File struct {
|
||||
fset *token.FileSet
|
||||
name string // Name of file.
|
||||
ident string // Identifier we are searching for.
|
||||
lowerIdent string // lower ident
|
||||
regexp *regexp.Regexp
|
||||
pathPrefix string // Prefix from GOROOT/GOPATH.
|
||||
urlPrefix string // Start of corresponding URL for golang.org or godoc.org.
|
||||
file *ast.File
|
||||
comments ast.CommentMap
|
||||
defs map[*ast.Ident]types.Object
|
||||
doPrint bool
|
||||
found bool
|
||||
allFiles []*File // All files in the package.
|
||||
}
|
||||
|
||||
// doPackage analyzes the single package constructed from the named files, looking for
|
||||
// the definition of ident.
|
||||
func doPackage(pkg *ast.Package, fset *token.FileSet, ident string) {
|
||||
var files []*File
|
||||
found := false
|
||||
for name, astFile := range pkg.Files {
|
||||
if packageFlag && astFile.Doc == nil {
|
||||
continue
|
||||
}
|
||||
file := &File{
|
||||
fset: fset,
|
||||
name: name,
|
||||
ident: ident,
|
||||
lowerIdent: strings.ToLower(ident),
|
||||
file: astFile,
|
||||
comments: ast.NewCommentMap(fset, astFile, astFile.Comments),
|
||||
}
|
||||
if regexpFlag && regexp.QuoteMeta(ident) != ident {
|
||||
// It's a regular expression.
|
||||
var err error
|
||||
file.regexp, err = regexp.Compile("^(?i:" + ident + ")$")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "regular expression `%s`:", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(name, goRootSrcPkg):
|
||||
file.urlPrefix = "http://golang.org/pkg"
|
||||
file.pathPrefix = goRootSrcPkg
|
||||
case strings.HasPrefix(name, goRootSrcCmd):
|
||||
file.urlPrefix = "http://golang.org/cmd"
|
||||
file.pathPrefix = goRootSrcCmd
|
||||
default:
|
||||
file.urlPrefix = "http://godoc.org"
|
||||
for _, path := range goPaths {
|
||||
p := filepath.Join(path, "src")
|
||||
if strings.HasPrefix(name, p) {
|
||||
file.pathPrefix = p
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
file.urlPrefix = urlHeadTag + file.urlPrefix
|
||||
files = append(files, file)
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
file.doPrint = false
|
||||
if packageFlag {
|
||||
file.pkgComments()
|
||||
} else {
|
||||
ast.Walk(file, file.file)
|
||||
if file.found {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return
|
||||
}
|
||||
|
||||
// By providing the Context with our own error function, it will continue
|
||||
// past the first error. There is no need for that function to do anything.
|
||||
config := types.Config{
|
||||
Error: func(error) {},
|
||||
}
|
||||
info := &types.Info{
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
}
|
||||
path := ""
|
||||
var astFiles []*ast.File
|
||||
for name, astFile := range pkg.Files {
|
||||
if path == "" {
|
||||
path = name
|
||||
}
|
||||
astFiles = append(astFiles, astFile)
|
||||
}
|
||||
config.Check(path, fset, astFiles, info) // Ignore errors.
|
||||
|
||||
// We need to search all files for methods, so record the full list in each file.
|
||||
for _, file := range files {
|
||||
file.allFiles = files
|
||||
}
|
||||
for _, file := range files {
|
||||
file.doPrint = true
|
||||
file.defs = info.Defs
|
||||
if packageFlag {
|
||||
file.pkgComments()
|
||||
} else {
|
||||
ast.Walk(file, file.file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Visit implements the ast.Visitor interface.
|
||||
func (f *File) Visit(node ast.Node) ast.Visitor {
|
||||
switch n := node.(type) {
|
||||
case *ast.GenDecl:
|
||||
// Variables, constants, types.
|
||||
for _, spec := range n.Specs {
|
||||
switch spec := spec.(type) {
|
||||
case *ast.ValueSpec:
|
||||
if constantFlag && n.Tok == token.CONST || variableFlag && n.Tok == token.VAR {
|
||||
for _, ident := range spec.Names {
|
||||
if f.match(ident.Name) {
|
||||
f.printNode(n, ident, f.nameURL(ident.Name))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.TypeSpec:
|
||||
// If there is only one Spec, there are probably no parens and the
|
||||
// comment we want appears before the type keyword, bound to
|
||||
// the GenDecl. If the Specs are parenthesized, the comment we want
|
||||
// is bound to the Spec. Hence we dig into the GenDecl to the Spec,
|
||||
// but only if there are no parens.
|
||||
node := ast.Node(n)
|
||||
if n.Lparen.IsValid() {
|
||||
node = spec
|
||||
}
|
||||
if f.match(spec.Name.Name) {
|
||||
if typeFlag {
|
||||
f.printNode(node, spec.Name, f.nameURL(spec.Name.Name))
|
||||
} else {
|
||||
switch spec.Type.(type) {
|
||||
case *ast.InterfaceType:
|
||||
if interfaceFlag {
|
||||
f.printNode(node, spec.Name, f.nameURL(spec.Name.Name))
|
||||
}
|
||||
case *ast.StructType:
|
||||
if structFlag {
|
||||
f.printNode(node, spec.Name, f.nameURL(spec.Name.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
if f.doPrint && f.defs[spec.Name] != nil && f.defs[spec.Name].Type() != nil {
|
||||
ms := types.NewMethodSet(f.defs[spec.Name].Type()) //.Type().MethodSet()
|
||||
if ms.Len() == 0 {
|
||||
ms = types.NewMethodSet(types.NewPointer(f.defs[spec.Name].Type())) //.MethodSet()
|
||||
}
|
||||
f.methodSet(ms)
|
||||
}
|
||||
}
|
||||
case *ast.ImportSpec:
|
||||
continue // Don't care.
|
||||
}
|
||||
}
|
||||
case *ast.FuncDecl:
|
||||
// Methods, top-level functions.
|
||||
if f.match(n.Name.Name) {
|
||||
n.Body = nil // Do not print the function body.
|
||||
if methodFlag && n.Recv != nil {
|
||||
f.printNode(n, n.Name, f.methodURL(n.Recv.List[0].Type, n.Name.Name))
|
||||
} else if functionFlag && n.Recv == nil {
|
||||
f.printNode(n, n.Name, f.nameURL(n.Name.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *File) match(name string) bool {
|
||||
// name must be exported.
|
||||
if !ast.IsExported(name) {
|
||||
return false
|
||||
}
|
||||
if f.regexp == nil {
|
||||
if matchWordFlag {
|
||||
if matchCaseFlag {
|
||||
return name == f.ident
|
||||
}
|
||||
return strings.ToLower(name) == f.lowerIdent
|
||||
} else {
|
||||
if matchCaseFlag {
|
||||
return strings.Contains(name, f.ident)
|
||||
}
|
||||
return strings.Contains(strings.ToLower(name), f.lowerIdent)
|
||||
}
|
||||
}
|
||||
return f.regexp.MatchString(name)
|
||||
}
|
||||
|
||||
func (f *File) printNode(node, ident ast.Node, url string) {
|
||||
if !f.doPrint {
|
||||
f.found = true
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s%s%s", url, f.sourcePos(f.fset.Position(ident.Pos())), f.docs(node))
|
||||
}
|
||||
|
||||
func (f *File) docs(node ast.Node) []byte {
|
||||
if !docFlag {
|
||||
return nil
|
||||
}
|
||||
commentedNode := printer.CommentedNode{Node: node}
|
||||
if comments := f.comments.Filter(node).Comments(); comments != nil {
|
||||
commentedNode.Comments = comments
|
||||
}
|
||||
var b bytes.Buffer
|
||||
printer.Fprint(&b, f.fset, &commentedNode)
|
||||
b.Write([]byte("\n\n")) // Add a blank line between entries if we print documentation.
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
func (f *File) pkgComments() {
|
||||
doc := f.file.Doc
|
||||
if doc == nil {
|
||||
return
|
||||
}
|
||||
url := ""
|
||||
if urlFlag {
|
||||
url = f.packageURL() + "\n"
|
||||
}
|
||||
docText := ""
|
||||
if docFlag {
|
||||
docText = fmt.Sprintf("package %s\n%s\n\n", f.file.Name.Name, doc.Text())
|
||||
}
|
||||
fmt.Printf("%s%s%s", url, f.sourcePos(f.fset.Position(doc.Pos())), docText)
|
||||
}
|
||||
|
||||
func (f *File) packageURL() string {
|
||||
s := strings.TrimPrefix(f.name, f.pathPrefix)
|
||||
// Now we have a path with a final file name. Drop it.
|
||||
if i := strings.LastIndex(s, slash); i > 0 {
|
||||
s = s[:i+1]
|
||||
}
|
||||
return f.urlPrefix + s
|
||||
}
|
||||
|
||||
func (f *File) packageName() string {
|
||||
s := strings.TrimPrefix(f.name, f.pathPrefix)
|
||||
// Now we have a path with a final file name. Drop it.
|
||||
if i := strings.LastIndex(s, slash); i > 0 {
|
||||
s = s[:i+1]
|
||||
}
|
||||
s = strings.Trim(s, slash)
|
||||
return filepath.ToSlash(s)
|
||||
}
|
||||
|
||||
func (f *File) sourcePos(posn token.Position) string {
|
||||
if !srcFlag {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:\n", posn.Filename, posn.Line)
|
||||
}
|
||||
|
||||
func (f *File) nameURL(name string) string {
|
||||
if !urlFlag {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s#%s\n", f.packageURL(), name)
|
||||
}
|
||||
|
||||
func (f *File) methodURL(typ ast.Expr, name string) string {
|
||||
if !urlFlag {
|
||||
return ""
|
||||
}
|
||||
var b bytes.Buffer
|
||||
printer.Fprint(&b, f.fset, typ)
|
||||
typeName := b.Bytes()
|
||||
if len(typeName) > 0 && typeName[0] == '*' {
|
||||
typeName = typeName[1:]
|
||||
}
|
||||
return fmt.Sprintf("%s#%s.%s\n", f.packageURL(), typeName, name)
|
||||
}
|
||||
|
||||
// Here follows the code to find and print a method (actually a method set, because
|
||||
// we want to do only one redundant tree walk, not one per method).
|
||||
// It should be much easier than walking the whole tree again, but that's what we must do.
|
||||
// TODO.
|
||||
|
||||
type method struct {
|
||||
index int // Which doc to write. (Keeps the results sorted)
|
||||
*types.Selection
|
||||
}
|
||||
|
||||
type methodVisitor struct {
|
||||
*File
|
||||
methods []method
|
||||
docs []string
|
||||
}
|
||||
|
||||
func (f *File) methodSet(set *types.MethodSet) {
|
||||
// Build the set of things we're looking for.
|
||||
methods := make([]method, 0, set.Len())
|
||||
docs := make([]string, set.Len())
|
||||
for i := 0; i < set.Len(); i++ {
|
||||
if ast.IsExported(set.At(i).Obj().Name()) {
|
||||
m := method{
|
||||
i,
|
||||
set.At(i),
|
||||
}
|
||||
methods = append(methods, m)
|
||||
}
|
||||
}
|
||||
if len(methods) == 0 {
|
||||
return
|
||||
}
|
||||
// Collect the docs.
|
||||
for _, file := range f.allFiles {
|
||||
visitor := &methodVisitor{
|
||||
File: file,
|
||||
methods: methods,
|
||||
docs: docs,
|
||||
}
|
||||
ast.Walk(visitor, file.file)
|
||||
methods = visitor.methods
|
||||
}
|
||||
// Print them in order. The incoming method set is sorted by name.
|
||||
for _, doc := range docs {
|
||||
if doc != "" {
|
||||
fmt.Print(doc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Visit implements the ast.Visitor interface.
|
||||
func (visitor *methodVisitor) Visit(node ast.Node) ast.Visitor {
|
||||
switch n := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
for i, method := range visitor.methods {
|
||||
// If this is the right one, the position of the name of its identifier will match.
|
||||
if method.Obj().Pos() == n.Name.Pos() {
|
||||
n.Body = nil // TODO. Ugly - don't print the function body.
|
||||
visitor.docs[method.index] = fmt.Sprintf("%s", visitor.File.docs(n))
|
||||
// If this was the last method, we're done.
|
||||
if len(visitor.methods) == 1 {
|
||||
return nil
|
||||
}
|
||||
// Drop this one from the list.
|
||||
visitor.methods = append(visitor.methods[:i], visitor.methods[i+1:]...)
|
||||
return visitor
|
||||
}
|
||||
}
|
||||
}
|
||||
return visitor
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,394 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package goimports
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/visualfc/gotools/stdlib"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// importToGroup is a list of functions which map from an import path to
|
||||
// a group number.
|
||||
var importToGroup = []func(importPath string) (num int, ok bool){
|
||||
func(importPath string) (num int, ok bool) {
|
||||
if strings.HasPrefix(importPath, "appengine") {
|
||||
return 2, true
|
||||
}
|
||||
return
|
||||
},
|
||||
func(importPath string) (num int, ok bool) {
|
||||
if strings.Contains(importPath, ".") {
|
||||
return 1, true
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
|
||||
func importGroup(importPath string) int {
|
||||
for _, fn := range importToGroup {
|
||||
if n, ok := fn(importPath); ok {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func fixImports(fset *token.FileSet, f *ast.File) (added []string, err error) {
|
||||
// refs are a set of possible package references currently unsatisfied by imports.
|
||||
// first key: either base package (e.g. "fmt") or renamed package
|
||||
// second key: referenced package symbol (e.g. "Println")
|
||||
refs := make(map[string]map[string]bool)
|
||||
|
||||
// decls are the current package imports. key is base package or renamed package.
|
||||
decls := make(map[string]*ast.ImportSpec)
|
||||
|
||||
// collect potential uses of packages.
|
||||
var visitor visitFn
|
||||
visitor = visitFn(func(node ast.Node) ast.Visitor {
|
||||
if node == nil {
|
||||
return visitor
|
||||
}
|
||||
switch v := node.(type) {
|
||||
case *ast.ImportSpec:
|
||||
if v.Name != nil {
|
||||
decls[v.Name.Name] = v
|
||||
} else {
|
||||
local := importPathToName(strings.Trim(v.Path.Value, `\"`))
|
||||
decls[local] = v
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
xident, ok := v.X.(*ast.Ident)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if xident.Obj != nil {
|
||||
// if the parser can resolve it, it's not a package ref
|
||||
break
|
||||
}
|
||||
pkgName := xident.Name
|
||||
if refs[pkgName] == nil {
|
||||
refs[pkgName] = make(map[string]bool)
|
||||
}
|
||||
if decls[pkgName] == nil {
|
||||
refs[pkgName][v.Sel.Name] = true
|
||||
}
|
||||
}
|
||||
return visitor
|
||||
})
|
||||
ast.Walk(visitor, f)
|
||||
|
||||
// Search for imports matching potential package references.
|
||||
searches := 0
|
||||
type result struct {
|
||||
ipath string
|
||||
name string
|
||||
err error
|
||||
}
|
||||
results := make(chan result)
|
||||
for pkgName, symbols := range refs {
|
||||
if len(symbols) == 0 {
|
||||
continue // skip over packages already imported
|
||||
}
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
ipath, rename, err := findImport(pkgName, symbols)
|
||||
r := result{ipath: ipath, err: err}
|
||||
if rename {
|
||||
r.name = pkgName
|
||||
}
|
||||
results <- r
|
||||
}(pkgName, symbols)
|
||||
searches++
|
||||
}
|
||||
for i := 0; i < searches; i++ {
|
||||
result := <-results
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
if result.ipath != "" {
|
||||
if result.name != "" {
|
||||
astutil.AddNamedImport(fset, f, result.name, result.ipath)
|
||||
} else {
|
||||
astutil.AddImport(fset, f, result.ipath)
|
||||
}
|
||||
added = append(added, result.ipath)
|
||||
}
|
||||
}
|
||||
|
||||
// Nil out any unused ImportSpecs, to be removed in following passes
|
||||
unusedImport := map[string]bool{}
|
||||
for pkg, is := range decls {
|
||||
if refs[pkg] == nil && pkg != "_" && pkg != "." {
|
||||
unusedImport[strings.Trim(is.Path.Value, `"`)] = true
|
||||
}
|
||||
}
|
||||
for ipath := range unusedImport {
|
||||
if ipath == "C" {
|
||||
// Don't remove cgo stuff.
|
||||
continue
|
||||
}
|
||||
astutil.DeleteImport(fset, f, ipath)
|
||||
}
|
||||
|
||||
return added, nil
|
||||
}
|
||||
|
||||
// importPathToName returns the package name for the given import path.
|
||||
var importPathToName = importPathToNameGoPath
|
||||
|
||||
// importPathToNameBasic assumes the package name is the base of import path.
|
||||
func importPathToNameBasic(importPath string) (packageName string) {
|
||||
return path.Base(importPath)
|
||||
}
|
||||
|
||||
// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
|
||||
// If there's a problem, it falls back to using importPathToNameBasic.
|
||||
func importPathToNameGoPath(importPath string) (packageName string) {
|
||||
if stdlib.IsStdPkg(importPath) {
|
||||
return path.Base(importPath)
|
||||
}
|
||||
if buildPkg, err := build.Import(importPath, "", 0); err == nil {
|
||||
return buildPkg.Name
|
||||
} else {
|
||||
return importPathToNameBasic(importPath)
|
||||
}
|
||||
}
|
||||
|
||||
type pkg struct {
|
||||
importpath string // full pkg import path, e.g. "net/http"
|
||||
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
|
||||
}
|
||||
|
||||
var pkgIndexOnce sync.Once
|
||||
|
||||
var pkgIndex struct {
|
||||
sync.Mutex
|
||||
m map[string][]pkg // shortname => []pkg, e.g "http" => "net/http"
|
||||
}
|
||||
|
||||
// gate is a semaphore for limiting concurrency.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) enter() { g <- struct{}{} }
|
||||
func (g gate) leave() { <-g }
|
||||
|
||||
// fsgate protects the OS & filesystem from too much concurrency.
|
||||
// Too much disk I/O -> too many threads -> swapping and bad scheduling.
|
||||
var fsgate = make(gate, 8)
|
||||
|
||||
func loadPkgIndex() {
|
||||
pkgIndex.Lock()
|
||||
pkgIndex.m = make(map[string][]pkg)
|
||||
pkgIndex.Unlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, path := range build.Default.SrcDirs() {
|
||||
fsgate.enter()
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
fsgate.leave()
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
children, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
fsgate.leave()
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
for _, child := range children {
|
||||
if child.IsDir() {
|
||||
wg.Add(1)
|
||||
go func(path, name string) {
|
||||
defer wg.Done()
|
||||
loadPkg(&wg, path, name)
|
||||
}(path, child.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func loadPkg(wg *sync.WaitGroup, root, pkgrelpath string) {
|
||||
importpath := filepath.ToSlash(pkgrelpath)
|
||||
dir := filepath.Join(root, importpath)
|
||||
|
||||
fsgate.enter()
|
||||
defer fsgate.leave()
|
||||
pkgDir, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
children, err := pkgDir.Readdir(-1)
|
||||
pkgDir.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// hasGo tracks whether a directory actually appears to be a
|
||||
// Go source code directory. If $GOPATH == $HOME, and
|
||||
// $HOME/src has lots of other large non-Go projects in it,
|
||||
// then the calls to importPathToName below can be expensive.
|
||||
hasGo := false
|
||||
for _, child := range children {
|
||||
name := child.Name()
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(name, ".go") {
|
||||
hasGo = true
|
||||
}
|
||||
if child.IsDir() {
|
||||
wg.Add(1)
|
||||
go func(root, name string) {
|
||||
defer wg.Done()
|
||||
loadPkg(wg, root, name)
|
||||
}(root, filepath.Join(importpath, name))
|
||||
}
|
||||
}
|
||||
if hasGo {
|
||||
shortName := importPathToName(importpath)
|
||||
pkgIndex.Lock()
|
||||
pkgIndex.m[shortName] = append(pkgIndex.m[shortName], pkg{
|
||||
importpath: importpath,
|
||||
dir: dir,
|
||||
})
|
||||
pkgIndex.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// loadExports returns a list exports for a package.
|
||||
var loadExports = loadExportsGoPath
|
||||
|
||||
func loadExportsGoPath(dir string) map[string]bool {
|
||||
exports := make(map[string]bool)
|
||||
buildPkg, err := build.ImportDir(dir, 0)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no buildable Go source files in") {
|
||||
return nil
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "could not import %q: %v\n", dir, err)
|
||||
return nil
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
for _, files := range [...][]string{buildPkg.GoFiles, buildPkg.CgoFiles} {
|
||||
for _, file := range files {
|
||||
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "could not parse %q: %v\n", file, err)
|
||||
continue
|
||||
}
|
||||
for name := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
exports[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return exports
|
||||
}
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns "".
|
||||
// Declared as a variable rather than a function so goimports can be easily
|
||||
// extended by adding a file with an init function.
|
||||
var findImport = findImportGoPath
|
||||
|
||||
func findImportGoPath(pkgName string, symbols map[string]bool) (string, bool, error) {
|
||||
// Fast path for the standard library.
|
||||
// In the common case we hopefully never have to scan the GOPATH, which can
|
||||
// be slow with moving disks.
|
||||
if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok {
|
||||
return pkg, rename, nil
|
||||
}
|
||||
|
||||
// TODO(sameer): look at the import lines for other Go files in the
|
||||
// local directory, since the user is likely to import the same packages
|
||||
// in the current Go file. Return rename=true when the other Go files
|
||||
// use a renamed package that's also used in the current file.
|
||||
|
||||
pkgIndexOnce.Do(loadPkgIndex)
|
||||
|
||||
// Collect exports for packages with matching names.
|
||||
var wg sync.WaitGroup
|
||||
var pkgsMu sync.Mutex // guards pkgs
|
||||
// full importpath => exported symbol => True
|
||||
// e.g. "net/http" => "Client" => True
|
||||
pkgs := make(map[string]map[string]bool)
|
||||
pkgIndex.Lock()
|
||||
for _, pkg := range pkgIndex.m[pkgName] {
|
||||
wg.Add(1)
|
||||
go func(importpath, dir string) {
|
||||
defer wg.Done()
|
||||
exports := loadExports(dir)
|
||||
if exports != nil {
|
||||
pkgsMu.Lock()
|
||||
pkgs[importpath] = exports
|
||||
pkgsMu.Unlock()
|
||||
}
|
||||
}(pkg.importpath, pkg.dir)
|
||||
}
|
||||
pkgIndex.Unlock()
|
||||
wg.Wait()
|
||||
|
||||
// Filter out packages missing required exported symbols.
|
||||
for symbol := range symbols {
|
||||
for importpath, exports := range pkgs {
|
||||
if !exports[symbol] {
|
||||
delete(pkgs, importpath)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(pkgs) == 0 {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// If there are multiple candidate packages, the shortest one wins.
|
||||
// This is a heuristic to prefer the standard library (e.g. "bytes")
|
||||
// over e.g. "github.com/foo/bar/bytes".
|
||||
shortest := ""
|
||||
for importPath := range pkgs {
|
||||
if shortest == "" || len(importPath) < len(shortest) {
|
||||
shortest = importPath
|
||||
}
|
||||
}
|
||||
return shortest, false, nil
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node) ast.Visitor
|
||||
|
||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
return fn(node)
|
||||
}
|
||||
|
||||
func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) {
|
||||
for symbol := range symbols {
|
||||
path := stdlib.Symbols[shortPkg+"."+symbol]
|
||||
if path == "" {
|
||||
return "", false, false
|
||||
}
|
||||
if importPath != "" && importPath != path {
|
||||
// Ambiguous. Symbols pointed to different things.
|
||||
return "", false, false
|
||||
}
|
||||
importPath = path
|
||||
}
|
||||
return importPath, false, importPath != ""
|
||||
}
|
|
@ -1,206 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package goimports
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/visualfc/gotools/command"
|
||||
)
|
||||
|
||||
var Command = &command.Command{
|
||||
Run: runGoimports,
|
||||
UsageLine: "goimports [flags] [path ...]",
|
||||
Short: "updates go import lines",
|
||||
Long: `goimports updates your Go import lines, adding missing ones and removing unreferenced ones. `,
|
||||
}
|
||||
|
||||
var (
|
||||
goimportsList bool
|
||||
goimportsWrite bool
|
||||
goimportsDiff bool
|
||||
goimportsAllErrors bool
|
||||
|
||||
// layout control
|
||||
goimportsComments bool
|
||||
goimportsTabWidth int
|
||||
goimportsTabIndent bool
|
||||
)
|
||||
|
||||
//func init
|
||||
func init() {
|
||||
Command.Flag.BoolVar(&goimportsList, "l", false, "list files whose formatting differs from goimport's")
|
||||
Command.Flag.BoolVar(&goimportsWrite, "w", false, "write result to (source) file instead of stdout")
|
||||
Command.Flag.BoolVar(&goimportsDiff, "d", false, "display diffs instead of rewriting files")
|
||||
Command.Flag.BoolVar(&goimportsAllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
|
||||
|
||||
// layout control
|
||||
Command.Flag.BoolVar(&goimportsComments, "comments", true, "print comments")
|
||||
Command.Flag.IntVar(&goimportsTabWidth, "tabwidth", 8, "tab width")
|
||||
Command.Flag.BoolVar(&goimportsTabIndent, "tabs", true, "indent with tabs")
|
||||
}
|
||||
|
||||
var (
|
||||
fileSet = token.NewFileSet() // per process FileSet
|
||||
exitCode = 0
|
||||
|
||||
initModesOnce sync.Once // guards calling initModes
|
||||
parserMode parser.Mode
|
||||
printerMode printer.Mode
|
||||
options *Options
|
||||
)
|
||||
|
||||
func report(err error) {
|
||||
scanner.PrintError(os.Stderr, err)
|
||||
exitCode = 2
|
||||
}
|
||||
|
||||
func runGoimports(cmd *command.Command, args []string) error {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
if goimportsTabWidth < 0 {
|
||||
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", goimportsTabWidth)
|
||||
exitCode = 2
|
||||
os.Exit(exitCode)
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
options = &Options{
|
||||
TabWidth: goimportsTabWidth,
|
||||
TabIndent: goimportsTabIndent,
|
||||
Comments: goimportsComments,
|
||||
AllErrors: goimportsAllErrors,
|
||||
Fragment: true,
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
if err := processFile("<standard input>", os.Stdin, os.Stdout, true); err != nil {
|
||||
report(err)
|
||||
}
|
||||
} else {
|
||||
for _, path := range args {
|
||||
switch dir, err := os.Stat(path); {
|
||||
case err != nil:
|
||||
report(err)
|
||||
case dir.IsDir():
|
||||
walkDir(path)
|
||||
default:
|
||||
if err := processFile(path, nil, os.Stdout, false); err != nil {
|
||||
report(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isGoFile(f os.FileInfo) bool {
|
||||
// ignore non-Go files
|
||||
name := f.Name()
|
||||
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
|
||||
}
|
||||
|
||||
func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error {
|
||||
if in == nil {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := Process(filename, src, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(src, res) {
|
||||
// formatting has changed
|
||||
if goimportsList {
|
||||
fmt.Fprintln(out, filename)
|
||||
}
|
||||
if goimportsWrite {
|
||||
err = ioutil.WriteFile(filename, res, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if goimportsDiff {
|
||||
data, err := diff(src, res)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing diff: %s", err)
|
||||
}
|
||||
fmt.Printf("diff %s gofmt/%s\n", filename, filename)
|
||||
out.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
if !goimportsList && !goimportsWrite && !goimportsDiff {
|
||||
_, err = out.Write(res)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func visitFile(path string, f os.FileInfo, err error) error {
|
||||
if err == nil && isGoFile(f) {
|
||||
err = processFile(path, nil, os.Stdout, false)
|
||||
}
|
||||
if err != nil {
|
||||
report(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkDir(path string) {
|
||||
filepath.Walk(path, visitFile)
|
||||
}
|
||||
|
||||
func diff(b1, b2 []byte) (data []byte, err error) {
|
||||
f1, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f1.Name())
|
||||
defer f1.Close()
|
||||
|
||||
f2, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f2.Name())
|
||||
defer f2.Close()
|
||||
|
||||
f1.Write(b1)
|
||||
f2.Write(b2)
|
||||
|
||||
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
|
||||
if len(data) > 0 {
|
||||
// diff exits with a non-zero status when the files don't match.
|
||||
// Ignore that failure as long as we get output.
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,281 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package goimports
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Options specifies options for processing files.
|
||||
type Options struct {
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
Comments bool // Print comments (true if nil *Options provided)
|
||||
TabIndent bool // Use tabs for indent (true if nil *Options provided)
|
||||
Format bool
|
||||
TabWidth int // Tab width (8 if nil *Options provided)
|
||||
}
|
||||
|
||||
// Process formats and adjusts imports for the provided file.
|
||||
// If opt is nil the defaults are used.
|
||||
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := goImportParse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fixImports(fileSet, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sortImports(fileSet, file)
|
||||
imps := astutil.Imports(fileSet, file)
|
||||
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range imps {
|
||||
// Within each block of contiguous imports, see if any
|
||||
// import lines are in different group numbers. If so,
|
||||
// we'll need to put a space between them so it's
|
||||
// compatible with gofmt.
|
||||
lastGroup := -1
|
||||
for _, importSpec := range impSection {
|
||||
importPath, _ := strconv.Unquote(importSpec.Path.Value)
|
||||
groupNum := importGroup(importPath)
|
||||
if groupNum != lastGroup && lastGroup != -1 {
|
||||
spacesBefore = append(spacesBefore, importPath)
|
||||
}
|
||||
lastGroup = groupNum
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printerMode := printer.UseSpaces
|
||||
if opt.TabIndent {
|
||||
printerMode |= printer.TabIndent
|
||||
}
|
||||
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = printConfig.Fprint(&buf, fileSet, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := buf.Bytes()
|
||||
if adjust != nil {
|
||||
out = adjust(src, out)
|
||||
}
|
||||
if len(spacesBefore) > 0 {
|
||||
out = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
||||
}
|
||||
if opt.Format {
|
||||
out, err = format.Source(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parse parses src, which was read from filename,
|
||||
// as a Go source file or statement list.
|
||||
func goImportParse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
|
||||
// Try as whole source file.
|
||||
file, err := parser.ParseFile(fset, filename, src, parserMode)
|
||||
if err == nil {
|
||||
return file, nil, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// package line and we accept fragmented input, fall through to
|
||||
// try as a source fragment. Stop and return on any other error.
|
||||
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a declaration list, make it a source file
|
||||
// by inserting a package clause.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in psrc match the ones in src.
|
||||
psrc := append([]byte("package main;"), src...)
|
||||
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
||||
if err == nil {
|
||||
// If a main function exists, we will assume this is a main
|
||||
// package and leave the file.
|
||||
if containsMainFunc(file) {
|
||||
return file, nil, nil
|
||||
}
|
||||
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the package clause.
|
||||
// Gofmt has turned the ; into a \n.
|
||||
src = src[len("package main\n"):]
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// declaration, fall through to try as a statement list.
|
||||
// Stop and return on any other error.
|
||||
if !strings.Contains(err.Error(), "expected declaration") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a statement list, make it a source file
|
||||
// by inserting a package clause and turning the list
|
||||
// into a function body. This handles expressions too.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in fsrc match the ones in src.
|
||||
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
|
||||
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
|
||||
if err == nil {
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the wrapping.
|
||||
// Gofmt has turned the ; into a \n\n.
|
||||
src = src[len("package p\n\nfunc _() {"):]
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
|
||||
// Failed, and out of options.
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// containsMainFunc checks if a file contains a function declaration with the
|
||||
// function signature 'func main()'
|
||||
func containsMainFunc(file *ast.File) bool {
|
||||
for _, decl := range file.Decls {
|
||||
if f, ok := decl.(*ast.FuncDecl); ok {
|
||||
if f.Name.Name != "main" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(f.Type.Params.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cutSpace(b []byte) (before, middle, after []byte) {
|
||||
i := 0
|
||||
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
|
||||
i++
|
||||
}
|
||||
j := len(b)
|
||||
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
|
||||
j--
|
||||
}
|
||||
if i <= j {
|
||||
return b[:i], b[i:j], b[j:]
|
||||
}
|
||||
return nil, nil, b[j:]
|
||||
}
|
||||
|
||||
// matchSpace reformats src to use the same space context as orig.
|
||||
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
|
||||
// 2) matchSpace copies the indentation of the first non-blank line in orig
|
||||
// to every non-blank line in src.
|
||||
// 3) matchSpace copies the trailing space from orig and uses it in place
|
||||
// of src's trailing space.
|
||||
func matchSpace(orig []byte, src []byte) []byte {
|
||||
before, _, after := cutSpace(orig)
|
||||
i := bytes.LastIndex(before, []byte{'\n'})
|
||||
before, indent := before[:i+1], before[i+1:]
|
||||
|
||||
_, src, _ = cutSpace(src)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Write(before)
|
||||
for len(src) > 0 {
|
||||
line := src
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, src = line[:i+1], line[i+1:]
|
||||
} else {
|
||||
src = nil
|
||||
}
|
||||
if len(line) > 0 && line[0] != '\n' { // not blank
|
||||
b.Write(indent)
|
||||
}
|
||||
b.Write(line)
|
||||
}
|
||||
b.Write(after)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
|
||||
|
||||
func addImportSpaces(r io.Reader, breaks []string) []byte {
|
||||
var out bytes.Buffer
|
||||
sc := bufio.NewScanner(r)
|
||||
inImports := false
|
||||
done := false
|
||||
for sc.Scan() {
|
||||
s := sc.Text()
|
||||
|
||||
if !inImports && !done && strings.HasPrefix(s, "import") {
|
||||
inImports = true
|
||||
}
|
||||
if inImports && (strings.HasPrefix(s, "var") ||
|
||||
strings.HasPrefix(s, "func") ||
|
||||
strings.HasPrefix(s, "const") ||
|
||||
strings.HasPrefix(s, "type")) {
|
||||
done = true
|
||||
inImports = false
|
||||
}
|
||||
if inImports && len(breaks) > 0 {
|
||||
if m := impLine.FindStringSubmatch(s); m != nil {
|
||||
if m[1] == string(breaks[0]) {
|
||||
out.WriteByte('\n')
|
||||
breaks = breaks[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(&out, s)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
// +build go1.2
|
||||
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hacked up copy of go/ast/import.go
|
||||
|
||||
package goimports
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
func sortImports(fset *token.FileSet, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
if !ok || d.Tok != token.IMPORT {
|
||||
// Not an import declaration, so we're done.
|
||||
// Imports are always first.
|
||||
break
|
||||
}
|
||||
|
||||
if len(d.Specs) == 0 {
|
||||
// Empty import block, remove it.
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
}
|
||||
|
||||
if !d.Lparen.IsValid() {
|
||||
// Not a block: sorted by default.
|
||||
continue
|
||||
}
|
||||
|
||||
// Identify and sort runs of specs on successive lines.
|
||||
i := 0
|
||||
specs := d.Specs[:0]
|
||||
for j, s := range d.Specs {
|
||||
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
|
||||
// j begins a new run. End this one.
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
|
||||
i = j
|
||||
}
|
||||
}
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
|
||||
d.Specs = specs
|
||||
|
||||
// Deduping can leave a blank line before the rparen; clean that up.
|
||||
if len(d.Specs) > 0 {
|
||||
lastSpec := d.Specs[len(d.Specs)-1]
|
||||
lastLine := fset.Position(lastSpec.Pos()).Line
|
||||
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
|
||||
fset.File(d.Rparen).MergeLine(rParenLine - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func importPath(s ast.Spec) string {
|
||||
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func importName(s ast.Spec) string {
|
||||
n := s.(*ast.ImportSpec).Name
|
||||
if n == nil {
|
||||
return ""
|
||||
}
|
||||
return n.Name
|
||||
}
|
||||
|
||||
func importComment(s ast.Spec) string {
|
||||
c := s.(*ast.ImportSpec).Comment
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.Text()
|
||||
}
|
||||
|
||||
// collapse indicates whether prev may be removed, leaving only next.
|
||||
func collapse(prev, next ast.Spec) bool {
|
||||
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
|
||||
return false
|
||||
}
|
||||
return prev.(*ast.ImportSpec).Comment == nil
|
||||
}
|
||||
|
||||
type posSpan struct {
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Can't short-circuit here even if specs are already sorted,
|
||||
// since they might yet need deduplication.
|
||||
// A lone import, however, may be safely ignored.
|
||||
if len(specs) <= 1 {
|
||||
return specs
|
||||
}
|
||||
|
||||
// Record positions for specs.
|
||||
pos := make([]posSpan, len(specs))
|
||||
for i, s := range specs {
|
||||
pos[i] = posSpan{s.Pos(), s.End()}
|
||||
}
|
||||
|
||||
// Identify comments in this range.
|
||||
// Any comment from pos[0].Start to the final line counts.
|
||||
lastLine := fset.Position(pos[len(pos)-1].End).Line
|
||||
cstart := len(f.Comments)
|
||||
cend := len(f.Comments)
|
||||
for i, g := range f.Comments {
|
||||
if g.Pos() < pos[0].Start {
|
||||
continue
|
||||
}
|
||||
if i < cstart {
|
||||
cstart = i
|
||||
}
|
||||
if fset.Position(g.End()).Line > lastLine {
|
||||
cend = i
|
||||
break
|
||||
}
|
||||
}
|
||||
comments := f.Comments[cstart:cend]
|
||||
|
||||
// Assign each comment to the import spec preceding it.
|
||||
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
|
||||
specIndex := 0
|
||||
for _, g := range comments {
|
||||
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
|
||||
specIndex++
|
||||
}
|
||||
s := specs[specIndex].(*ast.ImportSpec)
|
||||
importComment[s] = append(importComment[s], g)
|
||||
}
|
||||
|
||||
// Sort the import specs by import path.
|
||||
// Remove duplicates, when possible without data loss.
|
||||
// Reassign the import paths to have the same position sequence.
|
||||
// Reassign each comment to abut the end of its spec.
|
||||
// Sort the comments by new position.
|
||||
sort.Sort(byImportSpec(specs))
|
||||
|
||||
// Dedup. Thanks to our sorting, we can just consider
|
||||
// adjacent pairs of imports.
|
||||
deduped := specs[:0]
|
||||
for i, s := range specs {
|
||||
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
|
||||
deduped = append(deduped, s)
|
||||
} else {
|
||||
p := s.Pos()
|
||||
fset.File(p).MergeLine(fset.Position(p).Line)
|
||||
}
|
||||
}
|
||||
specs = deduped
|
||||
|
||||
// Fix up comment positions
|
||||
for i, s := range specs {
|
||||
s := s.(*ast.ImportSpec)
|
||||
if s.Name != nil {
|
||||
s.Name.NamePos = pos[i].Start
|
||||
}
|
||||
s.Path.ValuePos = pos[i].Start
|
||||
s.EndPos = pos[i].End
|
||||
for _, g := range importComment[s] {
|
||||
for _, c := range g.List {
|
||||
c.Slash = pos[i].End
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byCommentPos(comments))
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
type byImportSpec []ast.Spec // slice of *ast.ImportSpec
|
||||
|
||||
func (x byImportSpec) Len() int { return len(x) }
|
||||
func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byImportSpec) Less(i, j int) bool {
|
||||
ipath := importPath(x[i])
|
||||
jpath := importPath(x[j])
|
||||
|
||||
igroup := importGroup(ipath)
|
||||
jgroup := importGroup(jpath)
|
||||
if igroup != jgroup {
|
||||
return igroup < jgroup
|
||||
}
|
||||
|
||||
if ipath != jpath {
|
||||
return ipath < jpath
|
||||
}
|
||||
iname := importName(x[i])
|
||||
jname := importName(x[j])
|
||||
|
||||
if iname != jname {
|
||||
return iname < jname
|
||||
}
|
||||
return importComment(x[i]) < importComment(x[j])
|
||||
}
|
||||
|
||||
type byCommentPos []*ast.CommentGroup
|
||||
|
||||
func (x byCommentPos) Len() int { return len(x) }
|
||||
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
|
|
@ -1,14 +0,0 @@
|
|||
// +build !go1.2
|
||||
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package goimports
|
||||
|
||||
import "go/ast"
|
||||
|
||||
// Go 1.1 users don't get fancy package grouping.
|
||||
// But this is still gofmt-compliant:
|
||||
|
||||
var sortImports = ast.SortImports
|
|
@ -1,383 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//modify 2013-2014 visualfc
|
||||
|
||||
package gopresent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/visualfc/gotools/command"
|
||||
"golang.org/x/tools/present"
|
||||
)
|
||||
|
||||
var Command = &command.Command{
|
||||
Run: runPresent,
|
||||
UsageLine: "gopresent",
|
||||
Short: "golang present util",
|
||||
Long: `golang present util`,
|
||||
}
|
||||
|
||||
var presentVerifyOnly bool
|
||||
var presentInput string
|
||||
var presentStdout bool
|
||||
var presentOutput string
|
||||
|
||||
func init() {
|
||||
Command.Flag.BoolVar(&presentVerifyOnly, "v", false, "verify present only")
|
||||
Command.Flag.BoolVar(&presentStdout, "stdout", false, "output use std output")
|
||||
Command.Flag.StringVar(&presentInput, "i", "", "input golang present file")
|
||||
Command.Flag.StringVar(&presentOutput, "o", "", "output html file name")
|
||||
}
|
||||
|
||||
func runPresent(cmd *command.Command, args []string) error {
|
||||
if presentInput == "" || !isDoc(presentInput) {
|
||||
cmd.Usage()
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
if presentVerifyOnly {
|
||||
err := VerifyDoc(presentInput)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "present:%s", err)
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
w := os.Stdout
|
||||
if !presentStdout {
|
||||
if presentOutput == "" {
|
||||
presentOutput = presentInput + ".html"
|
||||
}
|
||||
ext := filepath.Ext(presentOutput)
|
||||
if ext != ".htm" && ext != ".html" {
|
||||
presentOutput += ".html"
|
||||
}
|
||||
var err error
|
||||
w, err = os.Create(presentOutput)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "present:%s", err)
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
}
|
||||
err := RenderDoc(w, presentInput)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "present:%s", err)
|
||||
command.SetExitStatus(3)
|
||||
command.Exit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var extensions = map[string]string{
|
||||
".slide": "slides.tmpl",
|
||||
".article": "article.tmpl",
|
||||
}
|
||||
|
||||
var extensions_tmpl = map[string]string{
|
||||
".slide": slides_tmpl,
|
||||
".article": article_tmpl,
|
||||
}
|
||||
|
||||
func isDoc(path string) bool {
|
||||
_, ok := extensions[filepath.Ext(path)]
|
||||
return ok
|
||||
}
|
||||
|
||||
func VerifyDoc(docFile string) error {
|
||||
doc, err := parse(docFile, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dir := filepath.Dir(docFile)
|
||||
return verify_doc(dir, doc)
|
||||
}
|
||||
|
||||
// renderDoc reads the present file, builds its template representation,
|
||||
// and executes the template, sending output to w.
|
||||
func renderDoc(w io.Writer, base, docFile string) error {
|
||||
// Read the input and build the doc structure.
|
||||
doc, err := parse(docFile, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find which template should be executed.
|
||||
ext := filepath.Ext(docFile)
|
||||
contentTmpl, ok := extensions[ext]
|
||||
if !ok {
|
||||
return fmt.Errorf("no template for extension %v", ext)
|
||||
}
|
||||
|
||||
// Locate the template file.
|
||||
actionTmpl := filepath.Join(base, "templates/action.tmpl")
|
||||
contentTmpl = filepath.Join(base, "templates", contentTmpl)
|
||||
|
||||
// Read and parse the input.
|
||||
tmpl := present.Template()
|
||||
tmpl = tmpl.Funcs(template.FuncMap{"playable": playable})
|
||||
if _, err := tmpl.ParseFiles(actionTmpl, contentTmpl); err != nil {
|
||||
return err
|
||||
}
|
||||
// Execute the template.
|
||||
return doc.Render(w, tmpl)
|
||||
}
|
||||
|
||||
func RenderDoc(w io.Writer, docFile string) error {
|
||||
// Read the input and build the doc structure.
|
||||
doc, err := parse(docFile, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find which template should be executed.
|
||||
ext := filepath.Ext(docFile)
|
||||
contentTmpl, ok := extensions_tmpl[ext]
|
||||
if !ok {
|
||||
return fmt.Errorf("no template for extension %v", ext)
|
||||
}
|
||||
|
||||
// Locate the template file.
|
||||
actionTmpl := action_tmpl //filepath.Join(base, "templates/action.tmpl")
|
||||
// Read and parse the input.
|
||||
tmpl := present.Template()
|
||||
tmpl = tmpl.Funcs(template.FuncMap{"playable": playable})
|
||||
if tmpl, err = tmpl.New("action").Parse(actionTmpl); err != nil {
|
||||
return err
|
||||
}
|
||||
if tmpl, err = tmpl.New("content").Parse(contentTmpl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Execute the template.
|
||||
return doc.Render(w, tmpl)
|
||||
}
|
||||
|
||||
func parse(name string, mode present.ParseMode) (*present.Doc, error) {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return present.Parse(f, name, 0)
|
||||
}
|
||||
|
||||
func playable(c present.Code) bool {
|
||||
return present.PlayEnabled && c.Play
|
||||
}
|
||||
|
||||
func isSkipURL(url string) bool {
|
||||
if filepath.HasPrefix(url, "http://") {
|
||||
return true
|
||||
}
|
||||
if filepath.HasPrefix(url, "https://") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func verify_path(root string, url string) error {
|
||||
if isSkipURL(url) {
|
||||
return nil
|
||||
}
|
||||
path := url
|
||||
if !filepath.IsAbs(url) {
|
||||
path = filepath.Join(root, path)
|
||||
}
|
||||
_, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verify_doc(root string, doc *present.Doc) error {
|
||||
for _, section := range doc.Sections {
|
||||
for _, elem := range section.Elem {
|
||||
switch i := elem.(type) {
|
||||
case present.Image:
|
||||
if err := verify_path(root, i.URL); err != nil {
|
||||
return fmt.Errorf("! .image %s not exist", i.URL)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var action_tmpl = `
|
||||
{/*
|
||||
This is the action template.
|
||||
It determines how the formatting actions are rendered.
|
||||
*/}
|
||||
|
||||
{{define "section"}}
|
||||
<h{{len .Number}} id="TOC_{{.FormattedNumber}}">{{.FormattedNumber}} {{.Title}}</h{{len .Number}}>
|
||||
{{range .Elem}}{{elem $.Template .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{define "list"}}
|
||||
<ul>
|
||||
{{range .Bullet}}
|
||||
<li>{{style .}}</li>
|
||||
{{end}}
|
||||
</ul>
|
||||
{{end}}
|
||||
|
||||
{{define "text"}}
|
||||
{{if .Pre}}
|
||||
<div class="code"><pre>{{range .Lines}}{{.}}{{end}}</pre></div>
|
||||
{{else}}
|
||||
<p>
|
||||
{{range $i, $l := .Lines}}{{if $i}}{{template "newline"}}
|
||||
{{end}}{{style $l}}{{end}}
|
||||
</p>
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{define "code"}}
|
||||
<div class="code{{if playable .}} playground{{end}}" contenteditable="true" spellcheck="false">{{.Text}}</div>
|
||||
{{end}}
|
||||
|
||||
{{define "image"}}
|
||||
<div class="image">
|
||||
<img src="{{.URL}}"{{with .Height}} height="{{.}}"{{end}}{{with .Width}} width="{{.}}"{{end}}>
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
{{define "iframe"}}
|
||||
<iframe src="{{.URL}}"{{with .Height}} height="{{.}}"{{end}}{{with .Width}} width="{{.}}"{{end}}></iframe>
|
||||
{{end}}
|
||||
|
||||
{{define "link"}}<p class="link"><a href="{{.URL}}" target="_blank">{{style .Label}}</a></p>{{end}}
|
||||
|
||||
{{define "html"}}{{.HTML}}{{end}}
|
||||
`
|
||||
|
||||
var article_tmpl = `
|
||||
{/* This is the article template. It defines how articles are formatted. */}
|
||||
|
||||
{{define "root"}}
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{.Title}}</title>
|
||||
<link type="text/css" rel="stylesheet" href="static/article.css">
|
||||
<meta charset='utf-8'>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="topbar" class="wide">
|
||||
<div class="container">
|
||||
<div id="heading">{{.Title}}
|
||||
{{with .Subtitle}}{{.}}{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="page" class="wide">
|
||||
<div class="container">
|
||||
{{with .Sections}}
|
||||
<div id="toc">
|
||||
{{template "TOC" .}}
|
||||
</div>
|
||||
{{end}}
|
||||
|
||||
{{range .Sections}}
|
||||
{{elem $.Template .}}
|
||||
{{end}}{{/* of Section block */}}
|
||||
|
||||
<h2>Authors</h2>
|
||||
{{range .Authors}}
|
||||
<div class="author">
|
||||
{{range .Elem}}{{elem $.Template .}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
</div>
|
||||
<script src='/play.js'></script>
|
||||
</body>
|
||||
</html>
|
||||
{{end}}
|
||||
|
||||
{{define "TOC"}}
|
||||
<ul>
|
||||
{{range .}}
|
||||
<li><a href="#TOC_{{.FormattedNumber}}">{{.Title}}</a></li>
|
||||
{{with .Sections}}{{template "TOC" .}}{{end}}
|
||||
{{end}}
|
||||
</ul>
|
||||
{{end}}
|
||||
|
||||
{{define "newline"}}
|
||||
{{/* No automatic line break. Paragraphs are free-form. */}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
var slides_tmpl = `
|
||||
{/* This is the slide template. It defines how presentations are formatted. */}
|
||||
|
||||
{{define "root"}}
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{.Title}}</title>
|
||||
<meta charset='utf-8'>
|
||||
<script src='static/slides.js'></script>
|
||||
</head>
|
||||
|
||||
<body style='display: none'>
|
||||
|
||||
<section class='slides layout-widescreen'>
|
||||
|
||||
<article>
|
||||
<h1>{{.Title}}</h1>
|
||||
{{with .Subtitle}}<h3>{{.}}</h3>{{end}}
|
||||
{{if not .Time.IsZero}}<h3>{{.Time.Format "2 January 2006"}}</h3>{{end}}
|
||||
{{range .Authors}}
|
||||
<div class="presenter">
|
||||
{{range .TextElem}}{{elem $.Template .}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
</article>
|
||||
|
||||
{{range $i, $s := .Sections}}
|
||||
<!-- start of slide {{$s.Number}} -->
|
||||
<article>
|
||||
{{if $s.Elem}}
|
||||
<h3>{{$s.Title}}</h3>
|
||||
{{range $s.Elem}}{{elem $.Template .}}{{end}}
|
||||
{{else}}
|
||||
<h2>{{$s.Title}}</h2>
|
||||
{{end}}
|
||||
</article>
|
||||
<!-- end of slide {{$i}} -->
|
||||
{{end}}{{/* of Slide block */}}
|
||||
|
||||
<article>
|
||||
<h3>Thank you</h1>
|
||||
{{range .Authors}}
|
||||
<div class="presenter">
|
||||
{{range .Elem}}{{elem $.Template .}}{{end}}
|
||||
</div>
|
||||
{{end}}
|
||||
</article>
|
||||
|
||||
</body>
|
||||
{{if .PlayEnabled}}
|
||||
<script src='/play.js'></script>
|
||||
{{end}}
|
||||
</html>
|
||||
{{end}}
|
||||
|
||||
{{define "newline"}}
|
||||
<br>
|
||||
{{end}}
|
||||
`
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue