🔧 Vendoring
This commit is contained in:
parent
f96c8befdf
commit
aea0392beb
|
@ -0,0 +1,20 @@
|
|||
FROM golang:latest
|
||||
MAINTAINER Liang Ding <d@b3log.org>
|
||||
|
||||
ENV GOROOT /usr/local/go
|
||||
|
||||
RUN apt-get update && apt-get install bzip2 zip unzip && cp -r /usr/local/go /usr/local/gobt
|
||||
ENV GOROOT_BOOTSTRAP=/usr/local/gobt
|
||||
|
||||
ADD . /wide/gogogo/src/github.com/b3log/wide
|
||||
ADD vendor/* /go/src/
|
||||
RUN go install github.com/visualfc/gotools github.com/nsf/gocode github.com/bradfitz/goimports
|
||||
|
||||
RUN useradd wide && useradd runner
|
||||
|
||||
ENV GOPATH /wide/gogogo
|
||||
|
||||
WORKDIR /wide/gogogo/src/github.com/b3log/wide
|
||||
RUN go build -v
|
||||
|
||||
EXPOSE 7070
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,180 @@
|
|||
# [Wide](https://github.com/b3log/wide) [![Build Status](https://img.shields.io/travis/b3log/wide.svg?style=flat)](https://travis-ci.org/b3log/wide) [![Go Report Card](https://goreportcard.com/badge/github.com/b3log/wide)](https://goreportcard.com/report/github.com/b3log/wide) [![Coverage Status](https://img.shields.io/coveralls/b3log/wide.svg?style=flat)](https://coveralls.io/r/b3log/wide) [![Apache License](https://img.shields.io/badge/license-apache2-orange.svg?style=flat)](https://www.apache.org/licenses/LICENSE-2.0) [![API Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/b3log/wide) [![Download](https://img.shields.io/badge/download-~4.3K-red.svg?style=flat)](https://pan.baidu.com/s/1dD3XwOT)
|
||||
|
||||
_Have a [try](https://wide.b3log.org/signup) first, then [download](https://pan.baidu.com/s/1dD3XwOT) and setup it on your local area network, enjoy yourself!_
|
||||
|
||||
先试试我们搭建好的[在线服务](https://wide.b3log.org/signup),你可以在这里[下载](https://pan.baidu.com/s/1dD3XwOT)并在本地环境运行,然后邀请小伙伴们来玩吧!
|
||||
|
||||
> * 关于 Wide 的产品定位,请看[这里](https://hacpai.com/article/1438407961481),并欢迎参与讨论~
|
||||
> * 加入[**黑客派**](https://hacpai.com/register),与其他程序员、设计师共同成长!
|
||||
|
||||
## Introduction
|
||||
|
||||
A <b>W</b>eb-based <b>IDE</b> for Teams using Go programming language/Golang.
|
||||
|
||||
![Hello, 世界](https://cloud.githubusercontent.com/assets/873584/4606377/d0ca3c2a-521b-11e4-912c-d955ab05850b.png)
|
||||
|
||||
## Authors
|
||||
|
||||
[Daniel](https://github.com/88250) and [Vanessa](https://github.com/Vanessa219) are the main authors of Wide, [here](https://github.com/b3log/wide/graphs/contributors) are all contributors.
|
||||
|
||||
Wide 的主要作者是 [Daniel](https://github.com/88250) 与 [Vanessa](https://github.com/Vanessa219),所有贡献者可以在[这里](https://github.com/b3log/wide/graphs/contributors)看到。
|
||||
|
||||
## Motivation
|
||||
|
||||
* **Team** IDE:
|
||||
* _Safe and reliable_: the project source code stored on the server in real time, the developer's machine crashes without losing any source code
|
||||
* _Unified environment_: server unified development environment configuration, the developer machine without any additional configuration
|
||||
* _Out of the box_: 5 minutes to setup a server then open browser to develop, debug
|
||||
* _Version Control_: each developer has its own source code repository, easy sync with the trunk
|
||||
* **Web-based** IDE:
|
||||
* Developer needs a browser only
|
||||
* Cross-platform, even on mobile devices
|
||||
* Easy to extend
|
||||
* Easy to integrate with other systems
|
||||
* For the geeks
|
||||
* A try for commercial-open source: versions customized for enterprises, close to their development work flows respectively
|
||||
* Currently more popular Go IDE has some defects or regrets:
|
||||
* Text editor (vim/emacs/sublime/Atom, etc.): For the Go newbie is too complex
|
||||
* Plug-in (goclipse, etc.): the need for the original IDE support, not professional
|
||||
* LiteIDE: no modern user interface :p
|
||||
* No team development experience
|
||||
* There are a few of GO IDEs, and no one developed by Go itself, this is a nice try
|
||||
|
||||
## Features
|
||||
|
||||
* [X] Code Highlight, Folding: Go/HTML/JavaScript/Markdown etc.
|
||||
* [X] Autocomplete: Go/HTML etc.
|
||||
* [X] Format: Go/HTML/JSON etc.
|
||||
* [X] Build & Run
|
||||
* [X] Multiplayer: a real team development experience
|
||||
* [X] Navigation, Jump to declaration, Find usages, File search etc.
|
||||
* [X] Shell: run command on the server
|
||||
* [X] Web development: HTML/JS/CSS editor with [Emmet](https://emmet.io) integrated
|
||||
* [X] Go tool: go get/install/fmt etc.
|
||||
* [X] File Import & Export
|
||||
* [X] Themes: editor and UI adjust, respectively
|
||||
* [X] Cross-Compilation
|
||||
* [ ] Debug
|
||||
* [ ] Git integration: git command on the web
|
||||
|
||||
## Screenshots
|
||||
|
||||
* **Overview**
|
||||
|
||||
![Overview](https://cloud.githubusercontent.com/assets/873584/5450620/1d51831e-8543-11e4-930b-670871902425.png)
|
||||
* **Goto File**
|
||||
|
||||
![Goto File](https://cloud.githubusercontent.com/assets/873584/5450616/1d495da6-8543-11e4-9285-f9d9c60779ac.png)
|
||||
* **Autocomplete**
|
||||
|
||||
![Autocomplete](https://cloud.githubusercontent.com/assets/873584/5450619/1d4d5712-8543-11e4-8fe4-35dbc8348a6e.png)
|
||||
* **Theme**
|
||||
|
||||
![4](https://cloud.githubusercontent.com/assets/873584/5450617/1d4c0826-8543-11e4-8b86-f79a4e41550a.png)
|
||||
* **Show Expression Info**
|
||||
|
||||
![Show Expression Info](https://cloud.githubusercontent.com/assets/873584/5450618/1d4cd9f4-8543-11e4-950f-121bd3ff4a39.png)
|
||||
* **Build Error Info**
|
||||
|
||||
![Build Error Info](https://cloud.githubusercontent.com/assets/873584/5450632/3e51cccc-8543-11e4-8ca8-8d2427aa16b8.png)
|
||||
* **Git Clone**
|
||||
|
||||
![Git Clone](https://cloud.githubusercontent.com/assets/873584/6545235/2284f230-c5b7-11e4-985e-7e04367921b1.png)
|
||||
* **Cross-Compilation**
|
||||
|
||||
![Cross-Compilation](https://cloud.githubusercontent.com/assets/873584/10130037/226d75fc-65f7-11e5-94e4-25ee579ca175.png)
|
||||
|
||||
* **Playground**
|
||||
![Playground](https://cloud.githubusercontent.com/assets/873584/21209772/449ecfd2-c2b1-11e6-9aa6-a83477d9f269.gif)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Build & Run
|
||||
|
||||
![Build & Run](https://cloud.githubusercontent.com/assets/873584/4389219/3642bc62-43f3-11e4-8d1f-06d7aaf22784.png)
|
||||
|
||||
* A browser tab corresponds to a Wide session
|
||||
* Execution output push via WebSocket
|
||||
|
||||
Flow:
|
||||
1. Browser sends ````Build```` request
|
||||
2. Server executes ````go build```` command via ````os/exec````<br/>
|
||||
2.1. Generates a executable file
|
||||
3. Browser sends ````Run```` request
|
||||
4. Server executes the file via ````os/exec````<br/>
|
||||
4.1. A running process<br/>
|
||||
4.2. Execution output push via WebSocket channel
|
||||
5. Browser renders with callback function ````ws.onmessage````
|
||||
|
||||
### Code Assist
|
||||
|
||||
![Code Assist](https://cloud.githubusercontent.com/assets/873584/4399135/3b80c21c-4463-11e4-8e94-7f7e8d12a4df.png)
|
||||
|
||||
* Autocompletion
|
||||
* Find Usages/Jump To Declaration/etc.
|
||||
|
||||
Flow:
|
||||
1. Browser sends code assist request
|
||||
2. Handler gets user workspace of the request with HTTP session
|
||||
3. Server executes ````gocode````/````ide_stub(gotools)````<br/>
|
||||
3.1 Sets environment variables (e.g. ${GOPATH})<br/>
|
||||
3.2 ````gocode```` with ````lib-path```` parameter
|
||||
|
||||
## Documents
|
||||
|
||||
* [用户指南](https://www.gitbook.com/book/88250/wide-user-guide)
|
||||
* [开发指南](https://www.gitbook.com/book/88250/wide-dev-guide)
|
||||
|
||||
## Setup
|
||||
|
||||
### Download Binary
|
||||
|
||||
We have provided OS-specific executable binary as follows:
|
||||
|
||||
* linux-amd64/386
|
||||
* windows-amd64/386
|
||||
* darwin-amd64/386
|
||||
|
||||
Download [HERE](https://pan.baidu.com/s/1dD3XwOT)!
|
||||
|
||||
### Build Wide for yourself
|
||||
|
||||
1. [Download](https://github.com/b3log/wide/archive/master.zip) source or by `git clone https://github.com/b3log/wide`
|
||||
2. Get dependencies with
|
||||
* `go get`
|
||||
* `go get github.com/visualfc/gotools github.com/nsf/gocode github.com/bradfitz/goimports`
|
||||
3. Compile wide with `go build`
|
||||
|
||||
### Docker
|
||||
|
||||
1. Get image: `sudo docker pull 88250/wide:latest`
|
||||
2. Run: `sudo docker run -p 127.0.0.1:7070:7070 88250/wide:latest ./wide -docker=true -channel=ws://127.0.0.1:7070`
|
||||
3. Open browser: http://127.0.0.1:7070
|
||||
|
||||
## Known Issues
|
||||
|
||||
* [Shell is not available on Windows](https://github.com/b3log/wide/issues/32)
|
||||
* [Rename directory](https://github.com/b3log/wide/issues/251)
|
||||
|
||||
## Terms
|
||||
|
||||
* This software is open sourced under the Apache License 2.0
|
||||
* You can not get rid of the "Powered by [B3log](https://b3log.org)" from any page, even which you made
|
||||
* If you want to use this software for commercial purpose, please mail to support@liuyun.io for a commercial license request
|
||||
* Copyright © b3log.org, all rights reserved
|
||||
|
||||
## Credits
|
||||
|
||||
Wide is made possible by the following open source projects.
|
||||
|
||||
* [golang](https://golang.org)
|
||||
* [CodeMirror](https://github.com/marijnh/CodeMirror)
|
||||
* [zTree](https://github.com/zTree/zTree_v3)
|
||||
* [LiteIDE](https://github.com/visualfc/liteide)
|
||||
* [gocode](https://github.com/nsf/gocode)
|
||||
* [Gorilla](https://github.com/gorilla)
|
||||
* [Docker](https://docker.com)
|
||||
|
||||
----
|
||||
|
||||
<img src="https://cloud.githubusercontent.com/assets/873584/4606328/4e848b96-5219-11e4-8db1-fa12774b57b4.png" width="256px" />
|
|
@ -0,0 +1,4 @@
|
|||
* This software is open sourced under the Apache License 2.0
|
||||
* You can not get rid of the "Powered by [B3log](https://b3log.org)" from any pages, even the pages are developed by you
|
||||
* If you want to use this software for commercial purpose, please mail to support@liuyun.io for request a commercial license
|
||||
* Copyright (c) b3log.org, all rights reserved
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
# see https://gist.github.com/hailiang/0f22736320abe6be71ce for more details
|
||||
|
||||
set -e
|
||||
|
||||
# Run test coverage on each subdirectories and merge the coverage profile.
|
||||
|
||||
echo "mode: count" > profile.cov
|
||||
|
||||
# Standard go tooling behavior is to ignore dirs with leading underscors
|
||||
for dir in $(find . -maxdepth 10 -not -path './.git*' -not -path '*/_*' -type d);
|
||||
do
|
||||
if ls $dir/*.go &> /dev/null; then
|
||||
go test -covermode=count -coverprofile=$dir/profile.tmp $dir
|
||||
if [ -f $dir/profile.tmp ]
|
||||
then
|
||||
cat $dir/profile.tmp | tail -n +2 >> profile.cov
|
||||
rm $dir/profile.tmp
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
go tool cover -func profile.cov
|
||||
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Copyright (c) 2014-2015, b3log.org
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* https://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file frontend tool.
|
||||
*
|
||||
* @author <a href="mailto:liliyuan@fangstar.net">Liyuan Li</a>
|
||||
* @version 0.1.0.0, Dec 15, 2015
|
||||
*/
|
||||
var gulp = require("gulp");
|
||||
var concat = require('gulp-concat');
|
||||
var minifyCSS = require('gulp-minify-css');
|
||||
var uglify = require('gulp-uglify');
|
||||
var sourcemaps = require("gulp-sourcemaps");
|
||||
|
||||
gulp.task('cc', function () {
|
||||
// css
|
||||
var cssLibs = ['./static/js/lib/jquery-layout/layout-default-latest.css',
|
||||
'./static/js/lib/codemirror-5.1/codemirror.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/hint/show-hint.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/lint.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldgutter.css',
|
||||
'./static/js/lib/codemirror-5.1/addon/dialog/dialog.css',
|
||||
'./static/js/overwrite/codemirror/theme/*.css'];
|
||||
gulp.src(cssLibs)
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('lib.min.css'))
|
||||
.pipe(gulp.dest('./static/css/'));
|
||||
|
||||
gulp.src('./static/js/lib/ztree/zTreeStyle.css')
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('zTreeStyle.min.css'))
|
||||
.pipe(gulp.dest('./static/js/lib/ztree/'));
|
||||
|
||||
var cssWide = ['./static/css/dialog.css',
|
||||
'./static/css/base.css',
|
||||
'./static/css/wide.css',
|
||||
'./static/css/side.css',
|
||||
'./static/css/start.css',
|
||||
'./static/css/about.css'
|
||||
];
|
||||
|
||||
gulp.src(cssWide)
|
||||
.pipe(minifyCSS())
|
||||
.pipe(concat('wide.min.css'))
|
||||
.pipe(gulp.dest('./static/css/'));
|
||||
|
||||
|
||||
// js
|
||||
var jsLibs = ['./static/js/lib/jquery-2.1.1.min.js',
|
||||
'./static/js/lib/jquery-ui.min.js',
|
||||
'./static/js/lib/jquery-layout/jquery.layout-latest.js',
|
||||
'./static/js/lib/reconnecting-websocket.js',
|
||||
'./static/js/lib/Autolinker.min.js',
|
||||
'./static/js/lib/emmet.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify-html.js',
|
||||
'./static/js/lib/js-beautify-1.5.4/beautify-css.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/vendor/jquery.ui.widget.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/jquery.iframe-transport.js',
|
||||
'./static/js/lib/jquery-file-upload-9.8.0/jquery.fileupload.js',
|
||||
'./static/js/lib/codemirror-5.1/codemirror.min.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/lint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/lint/json-lint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/selection/active-line.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/selection/active-line.js',
|
||||
'./static/js/overwrite/codemirror/addon/hint/show-hint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/hint/anyword-hint.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/display/rulers.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/closebrackets.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/matchbrackets.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/edit/closetag.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/searchcursor.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/search.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/dialog/dialog.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/search/match-highlighter.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldcode.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/foldgutter.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/brace-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/xml-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/markdown-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/comment-fold.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/mode/loadmode.js',
|
||||
'./static/js/lib/codemirror-5.1/addon/fold/comment/comment.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/meta.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/go/go.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/clike/clike.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/xml/xml.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/htmlmixed/htmlmixed.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/javascript/javascript.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/markdown/markdown.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/css/css.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/shell/shell.js',
|
||||
'./static/js/lib/codemirror-5.1/mode/sql/sql.js',
|
||||
'./static/js/lib/codemirror-5.1/keymap/vim.js',
|
||||
'./static/js/lib/lint/json-lint.js',
|
||||
'./static/js/lib/lint/go-lint.js'];
|
||||
gulp.src(jsLibs)
|
||||
.pipe(uglify())
|
||||
.pipe(concat('lib.min.js'))
|
||||
.pipe(gulp.dest('./static/js/'));
|
||||
|
||||
var jsWide = ['./static/js/tabs.js',
|
||||
'./static/js/tabs.js',
|
||||
'./static/js/dialog.js',
|
||||
'./static/js/editors.js',
|
||||
'./static/js/notification.js',
|
||||
'./static/js/tree.js',
|
||||
'./static/js/wide.js',
|
||||
'./static/js/session.js',
|
||||
'./static/js/menu.js',
|
||||
'./static/js/windows.js',
|
||||
'./static/js/hotkeys.js',
|
||||
'./static/js/bottomGroup.js'
|
||||
];
|
||||
gulp.src(jsWide)
|
||||
.pipe(sourcemaps.init())
|
||||
.pipe(uglify())
|
||||
.pipe(concat('wide.min.js'))
|
||||
.pipe(sourcemaps.write("."))
|
||||
.pipe(gulp.dest('./static/js/'));
|
||||
});
|
Binary file not shown.
|
@ -0,0 +1,483 @@
|
|||
// Copyright (c) 2014-2018, b3log.org & hacpai.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"flag"
|
||||
"html/template"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/b3log/wide/conf"
|
||||
"github.com/b3log/wide/editor"
|
||||
"github.com/b3log/wide/event"
|
||||
"github.com/b3log/wide/file"
|
||||
"github.com/b3log/wide/i18n"
|
||||
"github.com/b3log/wide/log"
|
||||
"github.com/b3log/wide/notification"
|
||||
"github.com/b3log/wide/output"
|
||||
"github.com/b3log/wide/playground"
|
||||
"github.com/b3log/wide/scm/git"
|
||||
"github.com/b3log/wide/session"
|
||||
"github.com/b3log/wide/util"
|
||||
)
|
||||
|
||||
// Logger
|
||||
var logger *log.Logger
|
||||
|
||||
// The only one init function in Wide.
|
||||
func init() {
|
||||
confPath := flag.String("conf", "conf/wide.json", "path of wide.json")
|
||||
confIP := flag.String("ip", "", "this will overwrite Wide.IP if specified")
|
||||
confPort := flag.String("port", "", "this will overwrite Wide.Port if specified")
|
||||
confServer := flag.String("server", "", "this will overwrite Wide.Server if specified")
|
||||
confLogLevel := flag.String("log_level", "", "this will overwrite Wide.LogLevel if specified")
|
||||
confStaticServer := flag.String("static_server", "", "this will overwrite Wide.StaticServer if specified")
|
||||
confContext := flag.String("context", "", "this will overwrite Wide.Context if specified")
|
||||
confChannel := flag.String("channel", "", "this will overwrite Wide.Channel if specified")
|
||||
confStat := flag.Bool("stat", false, "whether report statistics periodically")
|
||||
confDocker := flag.Bool("docker", false, "whether run in a docker container")
|
||||
confPlayground := flag.String("playground", "", "this will overwrite Wide.Playground if specified")
|
||||
confUsersWorkspaces := flag.String("users_workspaces", "", "this will overwrite Wide.UsersWorkspaces if specified")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
log.SetLevel("warn")
|
||||
logger = log.NewLogger(os.Stdout)
|
||||
|
||||
wd := util.OS.Pwd()
|
||||
if strings.HasPrefix(wd, os.TempDir()) {
|
||||
logger.Error("Don't run Wide in OS' temp directory or with `go run`")
|
||||
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
i18n.Load()
|
||||
event.Load()
|
||||
conf.Load(*confPath, *confIP, *confPort, *confServer, *confLogLevel, *confStaticServer, *confContext, *confChannel,
|
||||
*confPlayground, *confDocker, *confUsersWorkspaces)
|
||||
|
||||
conf.FixedTimeCheckEnv()
|
||||
session.FixedTimeSave()
|
||||
session.FixedTimeRelease()
|
||||
|
||||
if *confStat {
|
||||
session.FixedTimeReport()
|
||||
}
|
||||
|
||||
logger.Debug("host ["+runtime.Version()+", "+runtime.GOOS+"_"+runtime.GOARCH+"], cross-compilation ",
|
||||
util.Go.GetCrossPlatforms())
|
||||
}
|
||||
|
||||
// Main.
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(conf.Wide.MaxProcs)
|
||||
|
||||
initMime()
|
||||
handleSignal()
|
||||
|
||||
// IDE
|
||||
http.HandleFunc(conf.Wide.Context+"/", handlerGzWrapper(indexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/start", handlerWrapper(startHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/about", handlerWrapper(aboutHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/keyboard_shortcuts", handlerWrapper(keyboardShortcutsHandler))
|
||||
|
||||
// static resources
|
||||
http.Handle(conf.Wide.Context+"/static/", http.StripPrefix(conf.Wide.Context+"/static/", http.FileServer(http.Dir("static"))))
|
||||
serveSingle("/favicon.ico", "./static/favicon.ico")
|
||||
|
||||
// workspaces
|
||||
for _, user := range conf.Users {
|
||||
http.Handle(conf.Wide.Context+"/workspace/"+user.Name+"/",
|
||||
http.StripPrefix(conf.Wide.Context+"/workspace/"+user.Name+"/", http.FileServer(http.Dir(user.WorkspacePath()))))
|
||||
}
|
||||
|
||||
// session
|
||||
http.HandleFunc(conf.Wide.Context+"/session/ws", handlerWrapper(session.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/session/save", handlerWrapper(session.SaveContentHandler))
|
||||
|
||||
// run
|
||||
http.HandleFunc(conf.Wide.Context+"/build", handlerWrapper(output.BuildHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/run", handlerWrapper(output.RunHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/stop", handlerWrapper(output.StopHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/test", handlerWrapper(output.GoTestHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/vet", handlerWrapper(output.GoVetHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/get", handlerWrapper(output.GoGetHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/install", handlerWrapper(output.GoInstallHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/output/ws", handlerWrapper(output.WSHandler))
|
||||
|
||||
// cross-compilation
|
||||
http.HandleFunc(conf.Wide.Context+"/cross", handlerWrapper(output.CrossCompilationHandler))
|
||||
|
||||
// file tree
|
||||
http.HandleFunc(conf.Wide.Context+"/files", handlerWrapper(file.GetFilesHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/refresh", handlerWrapper(file.RefreshDirectoryHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file", handlerWrapper(file.GetFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/save", handlerWrapper(file.SaveFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/new", handlerWrapper(file.NewFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/remove", handlerWrapper(file.RemoveFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/rename", handlerWrapper(file.RenameFileHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/search/text", handlerWrapper(file.SearchTextHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/find/name", handlerWrapper(file.FindHandler))
|
||||
|
||||
// outline
|
||||
http.HandleFunc(conf.Wide.Context+"/outline", handlerWrapper(file.GetOutlineHandler))
|
||||
|
||||
// file export/import
|
||||
http.HandleFunc(conf.Wide.Context+"/file/zip/new", handlerWrapper(file.CreateZipHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/zip", handlerWrapper(file.GetZipHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/upload", handlerWrapper(file.UploadHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/file/decompress", handlerWrapper(file.DecompressHandler))
|
||||
|
||||
// editor
|
||||
http.HandleFunc(conf.Wide.Context+"/editor/ws", handlerWrapper(editor.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/go/fmt", handlerWrapper(editor.GoFmtHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/autocomplete", handlerWrapper(editor.AutocompleteHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/exprinfo", handlerWrapper(editor.GetExprInfoHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/find/decl", handlerWrapper(editor.FindDeclarationHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/find/usages", handlerWrapper(editor.FindUsagesHandler))
|
||||
|
||||
// shell
|
||||
// http.HandleFunc(conf.Wide.Context+"/shell/ws", handlerWrapper(shell.WSHandler))
|
||||
// http.HandleFunc(conf.Wide.Context+"/shell", handlerWrapper(shell.IndexHandler))
|
||||
|
||||
// notification
|
||||
http.HandleFunc(conf.Wide.Context+"/notification/ws", handlerWrapper(notification.WSHandler))
|
||||
|
||||
// user
|
||||
http.HandleFunc(conf.Wide.Context+"/login", handlerWrapper(session.LoginHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/logout", handlerWrapper(session.LogoutHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/signup", handlerWrapper(session.SignUpUserHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/preference", handlerWrapper(session.PreferenceHandler))
|
||||
|
||||
// playground
|
||||
http.HandleFunc(conf.Wide.Context+"/playground", handlerWrapper(playground.IndexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/", handlerWrapper(playground.IndexHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/ws", handlerWrapper(playground.WSHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/save", handlerWrapper(playground.SaveHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/short-url", handlerWrapper(playground.ShortURLHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/build", handlerWrapper(playground.BuildHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/run", handlerWrapper(playground.RunHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/stop", handlerWrapper(playground.StopHandler))
|
||||
http.HandleFunc(conf.Wide.Context+"/playground/autocomplete", handlerWrapper(playground.AutocompleteHandler))
|
||||
|
||||
// git
|
||||
http.HandleFunc(conf.Wide.Context+"/git/clone", handlerWrapper(git.CloneHandler))
|
||||
|
||||
logger.Infof("Wide is running [%s]", conf.Wide.Server+conf.Wide.Context)
|
||||
|
||||
err := http.ListenAndServe(conf.Wide.Server, nil)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
// indexHandler handles request of Wide index.
|
||||
func indexHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if conf.Wide.Context+"/" != r.RequestURI {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
if "playground" == username { // reserved user for Playground
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
user := conf.GetUser(username)
|
||||
if nil == user {
|
||||
logger.Warnf("Not found user [%s]", username)
|
||||
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
locale := user.Locale
|
||||
|
||||
wideSessions := session.WideSessions.GetByUsername(username)
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"username": username, "sid": session.WideSessions.GenId(), "latestSessionContent": user.LatestSessionContent,
|
||||
"pathSeparator": conf.PathSeparator, "codeMirrorVer": conf.CodeMirrorVer,
|
||||
"user": user, "editorThemes": conf.GetEditorThemes(), "crossPlatforms": util.Go.GetCrossPlatforms()}
|
||||
|
||||
logger.Debugf("User [%s] has [%d] sessions", username, len(wideSessions))
|
||||
|
||||
t, err := template.ParseFiles("views/index.html")
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// handleSignal handles system signal for graceful shutdown.
|
||||
func handleSignal() {
|
||||
go func() {
|
||||
c := make(chan os.Signal)
|
||||
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
|
||||
s := <-c
|
||||
logger.Tracef("Got signal [%s]", s)
|
||||
|
||||
session.SaveOnlineUsers()
|
||||
logger.Tracef("Saved all online user, exit")
|
||||
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
// serveSingle registers the handler function for the given pattern and filename.
|
||||
func serveSingle(pattern string, filename string) {
|
||||
http.HandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
|
||||
http.ServeFile(w, r, filename)
|
||||
})
|
||||
}
|
||||
|
||||
// startHandler handles request of start page.
|
||||
func startHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
userWorkspace := conf.GetUserWorkspace(username)
|
||||
|
||||
sid := r.URL.Query()["sid"][0]
|
||||
wSession := session.WideSessions.Get(sid)
|
||||
if nil == wSession {
|
||||
logger.Errorf("Session [%s] not found", sid)
|
||||
}
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"username": username, "workspace": userWorkspace, "ver": conf.WideVersion, "sid": sid}
|
||||
|
||||
t, err := template.ParseFiles("views/start.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// keyboardShortcutsHandler handles request of keyboard shortcuts page.
|
||||
func keyboardShortcutsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale}
|
||||
|
||||
t, err := template.ParseFiles("views/keyboard_shortcuts.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// aboutHandle handles request of about page.
|
||||
func aboutHandler(w http.ResponseWriter, r *http.Request) {
|
||||
httpSession, _ := session.HTTPSession.Get(r, "wide-session")
|
||||
if httpSession.IsNew {
|
||||
http.Redirect(w, r, conf.Wide.Context+"/login", http.StatusFound)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
httpSession.Options.MaxAge = conf.Wide.HTTPSessionMaxAge
|
||||
if "" != conf.Wide.Context {
|
||||
httpSession.Options.Path = conf.Wide.Context
|
||||
}
|
||||
httpSession.Save(r, w)
|
||||
|
||||
username := httpSession.Values["username"].(string)
|
||||
locale := conf.GetUser(username).Locale
|
||||
|
||||
model := map[string]interface{}{"conf": conf.Wide, "i18n": i18n.GetAll(locale), "locale": locale,
|
||||
"ver": conf.WideVersion, "goos": runtime.GOOS, "goarch": runtime.GOARCH, "gover": runtime.Version()}
|
||||
|
||||
t, err := template.ParseFiles("views/about.html")
|
||||
|
||||
if nil != err {
|
||||
logger.Error(err)
|
||||
http.Error(w, err.Error(), 500)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
t.Execute(w, model)
|
||||
}
|
||||
|
||||
// handlerWrapper wraps the HTTP Handler for some common processes.
|
||||
//
|
||||
// 1. panic recover
|
||||
// 2. request stopwatch
|
||||
// 3. i18n
|
||||
func handlerWrapper(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
handler := panicRecover(f)
|
||||
handler = stopwatch(handler)
|
||||
handler = i18nLoad(handler)
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// handlerGzWrapper wraps the HTTP Handler for some common processes.
|
||||
//
|
||||
// 1. panic recover
|
||||
// 2. gzip response
|
||||
// 3. request stopwatch
|
||||
// 4. i18n
|
||||
func handlerGzWrapper(f func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
handler := panicRecover(f)
|
||||
handler = gzipWrapper(handler)
|
||||
handler = stopwatch(handler)
|
||||
handler = i18nLoad(handler)
|
||||
|
||||
return handler
|
||||
}
|
||||
|
||||
// gzipWrapper wraps the process with response gzip.
|
||||
func gzipWrapper(f func(http.ResponseWriter, *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||
f(w, r)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Encoding", "gzip")
|
||||
gz := gzip.NewWriter(w)
|
||||
defer gz.Close()
|
||||
gzr := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||
|
||||
f(gzr, r)
|
||||
}
|
||||
}
|
||||
|
||||
// i18nLoad wraps the i18n process.
|
||||
func i18nLoad(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
i18n.Load()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// stopwatch wraps the request stopwatch process.
|
||||
func stopwatch(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
|
||||
defer func() {
|
||||
logger.Tracef("[%s, %s, %s]", r.Method, r.RequestURI, time.Since(start))
|
||||
}()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// panicRecover wraps the panic recover process.
|
||||
func panicRecover(handler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
defer util.Recover()
|
||||
|
||||
handler(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
// initMime initializes mime types.
|
||||
//
|
||||
// We can't get the mime types on some OS (such as Windows XP) by default, so initializes them here.
|
||||
func initMime() {
|
||||
mime.AddExtensionType(".css", "text/css")
|
||||
mime.AddExtensionType(".js", "application/x-javascript")
|
||||
mime.AddExtensionType(".json", "application/json")
|
||||
}
|
||||
|
||||
// gzipResponseWriter represents a gzip response writer.
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
}
|
||||
|
||||
// Write writes response with appropriate 'Content-Type'.
|
||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
if "" == w.Header().Get("Content-Type") {
|
||||
// If no content type, apply sniffing algorithm to un-gzipped body.
|
||||
w.Header().Set("Content-Type", http.DetectContentType(b))
|
||||
}
|
||||
|
||||
return w.Writer.Write(b)
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
{
|
||||
"name": "wide",
|
||||
"version": "1.4.0",
|
||||
"description": "A Web-based IDE for Teams using Go programming language/Golang.",
|
||||
"homepage": "https://wide.b3log.org",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/b3log/wide.git"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/b3log/wide/issues"
|
||||
},
|
||||
"license": "Apache License",
|
||||
"private": true,
|
||||
"author": "Daniel <d@b3log.org> (http://88250.b3log.org) & Vanessa <v@b3log.org> (http://vanessa.b3log.org)",
|
||||
"maintainers": [
|
||||
{
|
||||
"name": "Daniel",
|
||||
"email": "d@b3log.org"
|
||||
},
|
||||
{
|
||||
"name": "Vanessa",
|
||||
"email": "v@b3log.org"
|
||||
}
|
||||
],
|
||||
"devDependencies": {
|
||||
"gulp": "^3.9.1",
|
||||
"gulp-concat": "^2.6.1",
|
||||
"gulp-minify-css": "^1.2.4",
|
||||
"gulp-sourcemaps": "^2.6.0",
|
||||
"gulp-uglify": "^2.1.2"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Wide package tool.
|
||||
#
|
||||
# Command:
|
||||
# ./pkg.sh ${version} ${target}
|
||||
# Example:
|
||||
# ./pkg.sh 1.0.0 /home/daniel/1.0.0/
|
||||
|
||||
ver=$1
|
||||
target=$2
|
||||
list="conf doc i18n static views README.md TERMS.md LICENSE"
|
||||
|
||||
mkdir -p ${target}
|
||||
|
||||
echo version=${ver}
|
||||
echo target=${target}
|
||||
|
||||
## darwin
|
||||
os=darwin
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
## linux
|
||||
os=linux
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
|
||||
rm -f wide gotools gocode
|
||||
|
||||
## windows
|
||||
os=windows
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=amd64
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.zip
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
|
||||
rm -f wide.exe gotools.exe gocode.exe
|
||||
|
||||
export GOOS=${os}
|
||||
export GOARCH=386
|
||||
echo wide-${ver}-${GOOS}-${GOARCH}.zip
|
||||
go build
|
||||
go build github.com/visualfc/gotools
|
||||
go build github.com/nsf/gocode
|
||||
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
|
||||
rm -f wide.exe gotools.exe gocode.exe
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,17 @@
|
|||
This tool updates your Go import lines, adding missing ones and
|
||||
removing unreferenced ones.
|
||||
|
||||
$ go get golang.org/x/tools/cmd/goimports
|
||||
|
||||
Note the new location. This project has moved to the official
|
||||
go.tools repo. Pull requests here will no longer be accepted.
|
||||
Please use the Go process: http://golang.org/doc/contribute.html
|
||||
|
||||
It acts the same as gofmt (same flags, etc) but in addition to code
|
||||
formatting, also fixes imports.
|
||||
|
||||
See usage and editor integration notes, now moved elsewhere:
|
||||
|
||||
http://godoc.org/golang.org/x/tools/cmd/goimports
|
||||
|
||||
Happy hacking!
|
|
@ -0,0 +1,195 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/scanner"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
var (
|
||||
// main operation modes
|
||||
list = flag.Bool("l", false, "list files whose formatting differs from goimport's")
|
||||
write = flag.Bool("w", false, "write result to (source) file instead of stdout")
|
||||
doDiff = flag.Bool("d", false, "display diffs instead of rewriting files")
|
||||
|
||||
options = &imports.Options{
|
||||
TabWidth: 8,
|
||||
TabIndent: true,
|
||||
Comments: true,
|
||||
Fragment: true,
|
||||
}
|
||||
exitCode = 0
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)")
|
||||
}
|
||||
|
||||
func report(err error) {
|
||||
scanner.PrintError(os.Stderr, err)
|
||||
exitCode = 2
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
func isGoFile(f os.FileInfo) bool {
|
||||
// ignore non-Go files
|
||||
name := f.Name()
|
||||
return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
|
||||
}
|
||||
|
||||
func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error {
|
||||
opt := options
|
||||
if stdin {
|
||||
nopt := *options
|
||||
nopt.Fragment = true
|
||||
opt = &nopt
|
||||
}
|
||||
|
||||
if in == nil {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
in = f
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := imports.Process(filename, src, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(src, res) {
|
||||
// formatting has changed
|
||||
if *list {
|
||||
fmt.Fprintln(out, filename)
|
||||
}
|
||||
if *write {
|
||||
err = ioutil.WriteFile(filename, res, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if *doDiff {
|
||||
data, err := diff(src, res)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing diff: %s", err)
|
||||
}
|
||||
fmt.Printf("diff %s gofmt/%s\n", filename, filename)
|
||||
out.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
if !*list && !*write && !*doDiff {
|
||||
_, err = out.Write(res)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func visitFile(path string, f os.FileInfo, err error) error {
|
||||
if err == nil && isGoFile(f) {
|
||||
err = processFile(path, nil, os.Stdout, false)
|
||||
}
|
||||
if err != nil {
|
||||
report(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkDir(path string) {
|
||||
filepath.Walk(path, visitFile)
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
|
||||
// call gofmtMain in a separate function
|
||||
// so that it can use defer and have them
|
||||
// run before the exit.
|
||||
gofmtMain()
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func gofmtMain() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if options.TabWidth < 0 {
|
||||
fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth)
|
||||
exitCode = 2
|
||||
return
|
||||
}
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
if err := processFile("<standard input>", os.Stdin, os.Stdout, true); err != nil {
|
||||
report(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < flag.NArg(); i++ {
|
||||
path := flag.Arg(i)
|
||||
switch dir, err := os.Stat(path); {
|
||||
case err != nil:
|
||||
report(err)
|
||||
case dir.IsDir():
|
||||
walkDir(path)
|
||||
default:
|
||||
if err := processFile(path, nil, os.Stdout, false); err != nil {
|
||||
report(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diff(b1, b2 []byte) (data []byte, err error) {
|
||||
f1, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f1.Name())
|
||||
defer f1.Close()
|
||||
|
||||
f2, err := ioutil.TempFile("", "gofmt")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer os.Remove(f2.Name())
|
||||
defer f2.Close()
|
||||
|
||||
f1.Write(b1)
|
||||
f2.Write(b2)
|
||||
|
||||
data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
|
||||
if len(data) > 0 {
|
||||
// diff exits with a non-zero status when the files don't match.
|
||||
// Ignore that failure as long as we get output.
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
|
@ -0,0 +1,263 @@
|
|||
# Changelog
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/go-fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/go-fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/go-fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/go-fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/go-fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/go-fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/go-fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/go-fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/go-fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/go-fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/go-fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/go-fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/go-fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on OS X. [#14](https://github.com/go-fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/go-fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/go-fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/go-fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, OS X and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/go-fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd go-fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,64 @@
|
|||
# File system notifications for Go
|
||||
|
||||
[![Coverage](http://gocover.io/_badge/github.com/go-fsnotify/fsnotify)](http://gocover.io/github.com/go-fsnotify/fsnotify) [![GoDoc](https://godoc.org/gopkg.in/fsnotify.v1?status.svg)](https://godoc.org/gopkg.in/fsnotify.v1)
|
||||
|
||||
Go 1.3+ required.
|
||||
|
||||
Cross platform: Windows, Linux, BSD and OS X.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux, Android\*|Supported [![Build Status](https://travis-ci.org/go-fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/go-fsnotify/fsnotify)|
|
||||
|kqueue |BSD, OS X, iOS\*|Supported [![Circle CI](https://circleci.com/gh/go-fsnotify/fsnotify.svg?style=svg)](https://circleci.com/gh/go-fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |OS X |[Planned](https://github.com/go-fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[Planned](https://github.com/go-fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/go-fsnotify/fsnotify/issues/9)|
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/gopkg.in/fsnotify.v1) for usage. Consult the [Wiki](https://github.com/go-fsnotify/fsnotify/wiki) for the FAQ and further information.
|
||||
|
||||
## API stability
|
||||
|
||||
Two major versions of fsnotify exist.
|
||||
|
||||
**[fsnotify.v0](https://gopkg.in/fsnotify.v0)** is API-compatible with [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify). Bugfixes *may* be backported, but I recommend upgrading to v1.
|
||||
|
||||
```go
|
||||
import "gopkg.in/fsnotify.v0"
|
||||
```
|
||||
|
||||
\* Refer to the package as fsnotify (without the .v0 suffix).
|
||||
|
||||
**[fsnotify.v1](https://gopkg.in/fsnotify.v1)** provides [a new API](https://godoc.org/gopkg.in/fsnotify.v1) based on [this design document](http://goo.gl/MrYxyA). You can import v1 with:
|
||||
|
||||
```go
|
||||
import "gopkg.in/fsnotify.v1"
|
||||
```
|
||||
|
||||
Further API changes are [planned](https://github.com/go-fsnotify/fsnotify/milestones), but a new major revision will be tagged, so you can depend on the v1 API.
|
||||
|
||||
**Master** may have unreleased changes. Use it to test the very latest code or when [contributing][], but don't expect it to remain API-compatible:
|
||||
|
||||
```go
|
||||
import "github.com/go-fsnotify/fsnotify"
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/go-fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
[contributing]: https://github.com/go-fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/go-fsnotify/fsevents)
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
## OS X build (CircleCI iOS beta)
|
||||
|
||||
# Pretend like it's an Xcode project, at least to get it running.
|
||||
machine:
|
||||
environment:
|
||||
XCODE_WORKSPACE: NotUsed.xcworkspace
|
||||
XCODE_SCHEME: NotUsed
|
||||
# This is where the go project is actually checked out to:
|
||||
CIRCLE_BUILD_DIR: $HOME/.go_project/src/github.com/go-fsnotify/fsnotify
|
||||
|
||||
dependencies:
|
||||
pre:
|
||||
- brew upgrade go
|
||||
|
||||
test:
|
||||
override:
|
||||
- go test ./...
|
||||
|
||||
# Idealized future config, eventually with cross-platform build matrix :-)
|
||||
|
||||
# machine:
|
||||
# go:
|
||||
# version: 1.4
|
||||
# os:
|
||||
# - osx
|
||||
# - linux
|
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9,!solaris
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if e.Op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if e.Op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if e.Op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if e.Op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if e.Op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
|
||||
// If buffer remains empty, return no event names
|
||||
if buffer.Len() == 0 {
|
||||
return fmt.Sprintf("%q: ", e.Name)
|
||||
}
|
||||
|
||||
// Return a list of event names, with leading pipe character stripped
|
||||
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
|
||||
}
|
|
@ -0,0 +1,306 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := syscall.InotifyInit()
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
syscall.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = syscall.IN_MOVED_TO | syscall.IN_MOVED_FROM |
|
||||
syscall.IN_CREATE | syscall.IN_ATTRIB | syscall.IN_MODIFY |
|
||||
syscall.IN_MOVE_SELF | syscall.IN_DELETE | syscall.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
watchEntry, found := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
watchEntry.flags |= flags
|
||||
flags |= syscall.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := syscall.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// That means we can safely delete it from our watches, whatever inotify_rm_watch does.
|
||||
delete(w.watches, name)
|
||||
success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer syscall.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = syscall.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == syscall.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// syscall.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < syscall.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occured while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name := w.paths[int(raw.Wd)]
|
||||
w.mu.Unlock()
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += syscall.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&syscall.IN_IGNORED == syscall.IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&syscall.IN_CREATE == syscall.IN_CREATE || mask&syscall.IN_MOVED_TO == syscall.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&syscall.IN_DELETE_SELF == syscall.IN_DELETE_SELF || mask&syscall.IN_DELETE == syscall.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&syscall.IN_MODIFY == syscall.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&syscall.IN_MOVE_SELF == syscall.IN_MOVE_SELF || mask&syscall.IN_MOVED_FROM == syscall.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&syscall.IN_ATTRIB == syscall.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = syscall.EpollCreate(1)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = syscall.Pipe2(poller.pipe[:], syscall.O_NONBLOCK)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := syscall.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: syscall.EPOLLIN,
|
||||
}
|
||||
errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = syscall.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: syscall.EPOLLIN,
|
||||
}
|
||||
errno = syscall.EpollCtl(poller.epfd, syscall.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]syscall.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := syscall.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == syscall.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&syscall.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&syscall.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let syscall.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&syscall.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&syscall.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&syscall.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&syscall.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := syscall.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == syscall.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := syscall.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == syscall.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
syscall.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
syscall.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
syscall.Close(poller.epfd)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,463 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan bool),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
w.mu.Lock()
|
||||
ws := w.watches
|
||||
w.mu.Unlock()
|
||||
|
||||
var err error
|
||||
for name := range ws {
|
||||
if e := w.Remove(name); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine:
|
||||
w.done <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = syscall.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
syscall.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = syscall.NOTE_DELETE | syscall.NOTE_WRITE | syscall.NOTE_ATTRIB | syscall.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
func (w *Watcher) addWatch(name string, flags uint32) error {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = syscall.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = syscall.EV_ADD | syscall.EV_CLEAR | syscall.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
syscall.Close(watchfd)
|
||||
return err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
watchDir := (flags&syscall.NOTE_WRITE) == syscall.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&syscall.NOTE_WRITE) != syscall.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]syscall.Kevent_t, 10)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
err := syscall.Close(w.kq)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != syscall.EINTR {
|
||||
w.Errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel
|
||||
w.Events <- event
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
fileDir, _ := filepath.Split(event.Name)
|
||||
fileDir = filepath.Clean(fileDir)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); os.IsExist(err) {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
// FIXME: should this be for events on files or just isDir?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&syscall.NOTE_DELETE == syscall.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&syscall.NOTE_WRITE == syscall.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&syscall.NOTE_RENAME == syscall.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&syscall.NOTE_ATTRIB == syscall.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
if err := w.internalWatch(filePath, fileInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
w.Events <- newCreateEvent(filePath)
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
if err := w.internalWatch(filePath, fileInfo); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) error {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= syscall.NOTE_DELETE
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = syscall.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]syscall.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
syscall.SetKevent(&changes[i], fd, syscall.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := syscall.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []syscall.Kevent_t, timeout *syscall.Timespec) ([]syscall.Kevent_t, error) {
|
||||
n, err := syscall.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) syscall.Timespec {
|
||||
return syscall.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
const openMode = syscall.O_NONBLOCK | syscall.O_RDONLY
|
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = syscall.O_EVTONLY
|
|
@ -0,0 +1,561 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sys_FS_ALL_EVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sys_FS_ONESHOT = 0x80000000
|
||||
sys_FS_ONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sys_FS_ACCESS = 0x1
|
||||
sys_FS_ALL_EVENTS = 0xfff
|
||||
sys_FS_ATTRIB = 0x4
|
||||
sys_FS_CLOSE = 0x18
|
||||
sys_FS_CREATE = 0x100
|
||||
sys_FS_DELETE = 0x200
|
||||
sys_FS_DELETE_SELF = 0x400
|
||||
sys_FS_MODIFY = 0x2
|
||||
sys_FS_MOVE = 0xc0
|
||||
sys_FS_MOVED_FROM = 0x40
|
||||
sys_FS_MOVED_TO = 0x80
|
||||
sys_FS_MOVE_SELF = 0x800
|
||||
|
||||
// Special events
|
||||
sys_FS_IGNORED = 0x8000
|
||||
sys_FS_Q_OVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sys_FS_CREATE == sys_FS_CREATE || mask&sys_FS_MOVED_TO == sys_FS_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sys_FS_DELETE == sys_FS_DELETE || mask&sys_FS_DELETE_SELF == sys_FS_DELETE_SELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sys_FS_MODIFY == sys_FS_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sys_FS_MOVE == sys_FS_MOVE || mask&sys_FS_MOVE_SELF == sys_FS_MOVE_SELF || mask&sys_FS_MOVED_FROM == sys_FS_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sys_FS_ATTRIB == sys_FS_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sys_FS_Q_OVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := watch.path + "\\" + name
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sys_FS_DELETE_SELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sys_FS_MOVE_SELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sys_FS_ONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = watch.path + "\\" + watch.rename
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sys_FS_ACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sys_FS_MODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sys_FS_ATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sys_FS_CREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sys_FS_DELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sys_FS_MOVED_FROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sys_FS_MOVED_TO
|
||||
}
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,7 @@
|
|||
context
|
||||
=======
|
||||
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
|
||||
|
||||
gorilla/context is a general purpose registry for global request variables.
|
||||
|
||||
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package context
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
mutex sync.RWMutex
|
||||
data = make(map[*http.Request]map[interface{}]interface{})
|
||||
datat = make(map[*http.Request]int64)
|
||||
)
|
||||
|
||||
// Set stores a value for a given key in a given request.
|
||||
func Set(r *http.Request, key, val interface{}) {
|
||||
mutex.Lock()
|
||||
if data[r] == nil {
|
||||
data[r] = make(map[interface{}]interface{})
|
||||
datat[r] = time.Now().Unix()
|
||||
}
|
||||
data[r][key] = val
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a value stored for a given key in a given request.
|
||||
func Get(r *http.Request, key interface{}) interface{} {
|
||||
mutex.RLock()
|
||||
if ctx := data[r]; ctx != nil {
|
||||
value := ctx[key]
|
||||
mutex.RUnlock()
|
||||
return value
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOk returns stored value and presence state like multi-value return of map access.
|
||||
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
|
||||
mutex.RLock()
|
||||
if _, ok := data[r]; ok {
|
||||
value, ok := data[r][key]
|
||||
mutex.RUnlock()
|
||||
return value, ok
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
|
||||
func GetAll(r *http.Request) map[interface{}]interface{} {
|
||||
mutex.RLock()
|
||||
if context, ok := data[r]; ok {
|
||||
result := make(map[interface{}]interface{}, len(context))
|
||||
for k, v := range context {
|
||||
result[k] = v
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return result
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
|
||||
// the request was registered.
|
||||
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
|
||||
mutex.RLock()
|
||||
context, ok := data[r]
|
||||
result := make(map[interface{}]interface{}, len(context))
|
||||
for k, v := range context {
|
||||
result[k] = v
|
||||
}
|
||||
mutex.RUnlock()
|
||||
return result, ok
|
||||
}
|
||||
|
||||
// Delete removes a value stored for a given key in a given request.
|
||||
func Delete(r *http.Request, key interface{}) {
|
||||
mutex.Lock()
|
||||
if data[r] != nil {
|
||||
delete(data[r], key)
|
||||
}
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// Clear removes all values stored for a given request.
|
||||
//
|
||||
// This is usually called by a handler wrapper to clean up request
|
||||
// variables at the end of a request lifetime. See ClearHandler().
|
||||
func Clear(r *http.Request) {
|
||||
mutex.Lock()
|
||||
clear(r)
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
// clear is Clear without the lock.
|
||||
func clear(r *http.Request) {
|
||||
delete(data, r)
|
||||
delete(datat, r)
|
||||
}
|
||||
|
||||
// Purge removes request data stored for longer than maxAge, in seconds.
|
||||
// It returns the amount of requests removed.
|
||||
//
|
||||
// If maxAge <= 0, all request data is removed.
|
||||
//
|
||||
// This is only used for sanity check: in case context cleaning was not
|
||||
// properly set some request data can be kept forever, consuming an increasing
|
||||
// amount of memory. In case this is detected, Purge() must be called
|
||||
// periodically until the problem is fixed.
|
||||
func Purge(maxAge int) int {
|
||||
mutex.Lock()
|
||||
count := 0
|
||||
if maxAge <= 0 {
|
||||
count = len(data)
|
||||
data = make(map[*http.Request]map[interface{}]interface{})
|
||||
datat = make(map[*http.Request]int64)
|
||||
} else {
|
||||
min := time.Now().Unix() - int64(maxAge)
|
||||
for r := range data {
|
||||
if datat[r] < min {
|
||||
clear(r)
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex.Unlock()
|
||||
return count
|
||||
}
|
||||
|
||||
// ClearHandler wraps an http.Handler and clears request values at the end
|
||||
// of a request lifetime.
|
||||
func ClearHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer Clear(r)
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package context stores values shared during a request lifetime.
|
||||
|
||||
For example, a router can set variables extracted from the URL and later
|
||||
application handlers can access those values, or it can be used to store
|
||||
sessions values to be saved at the end of a request. There are several
|
||||
others common uses.
|
||||
|
||||
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
|
||||
|
||||
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
|
||||
|
||||
Here's the basic usage: first define the keys that you will need. The key
|
||||
type is interface{} so a key can be of any type that supports equality.
|
||||
Here we define a key using a custom int type to avoid name collisions:
|
||||
|
||||
package foo
|
||||
|
||||
import (
|
||||
"github.com/gorilla/context"
|
||||
)
|
||||
|
||||
type key int
|
||||
|
||||
const MyKey key = 0
|
||||
|
||||
Then set a variable. Variables are bound to an http.Request object, so you
|
||||
need a request instance to set a value:
|
||||
|
||||
context.Set(r, MyKey, "bar")
|
||||
|
||||
The application can later access the variable using the same key you provided:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// val is "bar".
|
||||
val := context.Get(r, foo.MyKey)
|
||||
|
||||
// returns ("bar", true)
|
||||
val, ok := context.GetOk(r, foo.MyKey)
|
||||
// ...
|
||||
}
|
||||
|
||||
And that's all about the basic usage. We discuss some other ideas below.
|
||||
|
||||
Any type can be stored in the context. To enforce a given type, make the key
|
||||
private and wrap Get() and Set() to accept and return values of a specific
|
||||
type:
|
||||
|
||||
type key int
|
||||
|
||||
const mykey key = 0
|
||||
|
||||
// GetMyKey returns a value for this package from the request values.
|
||||
func GetMyKey(r *http.Request) SomeType {
|
||||
if rv := context.Get(r, mykey); rv != nil {
|
||||
return rv.(SomeType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMyKey sets a value for this package in the request values.
|
||||
func SetMyKey(r *http.Request, val SomeType) {
|
||||
context.Set(r, mykey, val)
|
||||
}
|
||||
|
||||
Variables must be cleared at the end of a request, to remove all values
|
||||
that were stored. This can be done in an http.Handler, after a request was
|
||||
served. Just call Clear() passing the request:
|
||||
|
||||
context.Clear(r)
|
||||
|
||||
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
|
||||
variables at the end of a request lifetime.
|
||||
|
||||
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
|
||||
so if you are using either of them you don't need to clear the context manually.
|
||||
*/
|
||||
package context
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,3 @@
|
|||
securecookie
|
||||
============
|
||||
[![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie)
|
|
@ -0,0 +1,61 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gorilla/securecookie encodes and decodes authenticated and optionally
|
||||
encrypted cookie values.
|
||||
|
||||
Secure cookies can't be forged, because their values are validated using HMAC.
|
||||
When encrypted, the content is also inaccessible to malicious eyes.
|
||||
|
||||
To use it, first create a new SecureCookie instance:
|
||||
|
||||
var hashKey = []byte("very-secret")
|
||||
var blockKey = []byte("a-lot-secret")
|
||||
var s = securecookie.New(hashKey, blockKey)
|
||||
|
||||
The hashKey is required, used to authenticate the cookie value using HMAC.
|
||||
It is recommended to use a key with 32 or 64 bytes.
|
||||
|
||||
The blockKey is optional, used to encrypt the cookie value -- set it to nil
|
||||
to not use encryption. If set, the length must correspond to the block size
|
||||
of the encryption algorithm. For AES, used by default, valid lengths are
|
||||
16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.
|
||||
|
||||
Strong keys can be created using the convenience function GenerateRandomKey().
|
||||
|
||||
Once a SecureCookie instance is set, use it to encode a cookie value:
|
||||
|
||||
func SetCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
value := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
if encoded, err := s.Encode("cookie-name", value); err == nil {
|
||||
cookie := &http.Cookie{
|
||||
Name: "cookie-name",
|
||||
Value: encoded,
|
||||
Path: "/",
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
}
|
||||
|
||||
Later, use the same SecureCookie instance to decode and validate a cookie
|
||||
value:
|
||||
|
||||
func ReadCookieHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if cookie, err := r.Cookie("cookie-name"); err == nil {
|
||||
value := make(map[string]string)
|
||||
if err = s2.Decode("cookie-name", cookie.Value, &value); err == nil {
|
||||
fmt.Fprintf(w, "The value of foo is %q", value["foo"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
We stored a map[string]string, but secure cookies can hold any value that
|
||||
can be encoded using encoding/gob. To store custom types, they must be
|
||||
registered first using gob.Register(). For basic types this is not needed;
|
||||
it works out of the box.
|
||||
*/
|
||||
package securecookie
|
|
@ -0,0 +1,429 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package securecookie
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoCodecs = errors.New("securecookie: no codecs provided")
|
||||
errHashKeyNotSet = errors.New("securecookie: hash key is not set")
|
||||
|
||||
ErrMacInvalid = errors.New("securecookie: the value is not valid")
|
||||
)
|
||||
|
||||
// Codec defines an interface to encode and decode cookie values.
|
||||
type Codec interface {
|
||||
Encode(name string, value interface{}) (string, error)
|
||||
Decode(name, value string, dst interface{}) error
|
||||
}
|
||||
|
||||
// New returns a new SecureCookie.
|
||||
//
|
||||
// hashKey is required, used to authenticate values using HMAC. Create it using
|
||||
// GenerateRandomKey(). It is recommended to use a key with 32 or 64 bytes.
|
||||
//
|
||||
// blockKey is optional, used to encrypt values. Create it using
|
||||
// GenerateRandomKey(). The key length must correspond to the block size
|
||||
// of the encryption algorithm. For AES, used by default, valid lengths are
|
||||
// 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256.
|
||||
func New(hashKey, blockKey []byte) *SecureCookie {
|
||||
s := &SecureCookie{
|
||||
hashKey: hashKey,
|
||||
blockKey: blockKey,
|
||||
hashFunc: sha256.New,
|
||||
maxAge: 86400 * 30,
|
||||
maxLength: 4096,
|
||||
}
|
||||
if hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
}
|
||||
if blockKey != nil {
|
||||
s.BlockFunc(aes.NewCipher)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// SecureCookie encodes and decodes authenticated and optionally encrypted
|
||||
// cookie values.
|
||||
type SecureCookie struct {
|
||||
hashKey []byte
|
||||
hashFunc func() hash.Hash
|
||||
blockKey []byte
|
||||
block cipher.Block
|
||||
maxLength int
|
||||
maxAge int64
|
||||
minAge int64
|
||||
err error
|
||||
// For testing purposes, the function that returns the current timestamp.
|
||||
// If not set, it will use time.Now().UTC().Unix().
|
||||
timeFunc func() int64
|
||||
}
|
||||
|
||||
// MaxLength restricts the maximum length, in bytes, for the cookie value.
|
||||
//
|
||||
// Default is 4096, which is the maximum value accepted by Internet Explorer.
|
||||
func (s *SecureCookie) MaxLength(value int) *SecureCookie {
|
||||
s.maxLength = value
|
||||
return s
|
||||
}
|
||||
|
||||
// MaxAge restricts the maximum age, in seconds, for the cookie value.
|
||||
//
|
||||
// Default is 86400 * 30. Set it to 0 for no restriction.
|
||||
func (s *SecureCookie) MaxAge(value int) *SecureCookie {
|
||||
s.maxAge = int64(value)
|
||||
return s
|
||||
}
|
||||
|
||||
// MinAge restricts the minimum age, in seconds, for the cookie value.
|
||||
//
|
||||
// Default is 0 (no restriction).
|
||||
func (s *SecureCookie) MinAge(value int) *SecureCookie {
|
||||
s.minAge = int64(value)
|
||||
return s
|
||||
}
|
||||
|
||||
// HashFunc sets the hash function used to create HMAC.
|
||||
//
|
||||
// Default is crypto/sha256.New.
|
||||
func (s *SecureCookie) HashFunc(f func() hash.Hash) *SecureCookie {
|
||||
s.hashFunc = f
|
||||
return s
|
||||
}
|
||||
|
||||
// BlockFunc sets the encryption function used to create a cipher.Block.
|
||||
//
|
||||
// Default is crypto/aes.New.
|
||||
func (s *SecureCookie) BlockFunc(f func([]byte) (cipher.Block, error)) *SecureCookie {
|
||||
if s.blockKey == nil {
|
||||
s.err = errors.New("securecookie: block key is not set")
|
||||
} else if block, err := f(s.blockKey); err == nil {
|
||||
s.block = block
|
||||
} else {
|
||||
s.err = err
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Encode encodes a cookie value.
|
||||
//
|
||||
// It serializes, optionally encrypts, signs with a message authentication code, and
|
||||
// finally encodes the value.
|
||||
//
|
||||
// The name argument is the cookie name. It is stored with the encoded value.
|
||||
// The value argument is the value to be encoded. It can be any value that can
|
||||
// be encoded using encoding/gob. To store special structures, they must be
|
||||
// registered first using gob.Register().
|
||||
func (s *SecureCookie) Encode(name string, value interface{}) (string, error) {
|
||||
if s.err != nil {
|
||||
return "", s.err
|
||||
}
|
||||
if s.hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
return "", s.err
|
||||
}
|
||||
var err error
|
||||
var b []byte
|
||||
// 1. Serialize.
|
||||
if b, err = serialize(value); err != nil {
|
||||
return "", err
|
||||
}
|
||||
// 2. Encrypt (optional).
|
||||
if s.block != nil {
|
||||
if b, err = encrypt(s.block, b); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
b = encode(b)
|
||||
// 3. Create MAC for "name|date|value". Extra pipe to be used later.
|
||||
b = []byte(fmt.Sprintf("%s|%d|%s|", name, s.timestamp(), b))
|
||||
mac := createMac(hmac.New(s.hashFunc, s.hashKey), b[:len(b)-1])
|
||||
// Append mac, remove name.
|
||||
b = append(b, mac...)[len(name)+1:]
|
||||
// 4. Encode to base64.
|
||||
b = encode(b)
|
||||
// 5. Check length.
|
||||
if s.maxLength != 0 && len(b) > s.maxLength {
|
||||
return "", errors.New("securecookie: the value is too long")
|
||||
}
|
||||
// Done.
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
// Decode decodes a cookie value.
|
||||
//
|
||||
// It decodes, verifies a message authentication code, optionally decrypts and
|
||||
// finally deserializes the value.
|
||||
//
|
||||
// The name argument is the cookie name. It must be the same name used when
|
||||
// it was stored. The value argument is the encoded cookie value. The dst
|
||||
// argument is where the cookie will be decoded. It must be a pointer.
|
||||
func (s *SecureCookie) Decode(name, value string, dst interface{}) error {
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
if s.hashKey == nil {
|
||||
s.err = errHashKeyNotSet
|
||||
return s.err
|
||||
}
|
||||
// 1. Check length.
|
||||
if s.maxLength != 0 && len(value) > s.maxLength {
|
||||
return errors.New("securecookie: the value is too long")
|
||||
}
|
||||
// 2. Decode from base64.
|
||||
b, err := decode([]byte(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 3. Verify MAC. Value is "date|value|mac".
|
||||
parts := bytes.SplitN(b, []byte("|"), 3)
|
||||
if len(parts) != 3 {
|
||||
return ErrMacInvalid
|
||||
}
|
||||
h := hmac.New(s.hashFunc, s.hashKey)
|
||||
b = append([]byte(name+"|"), b[:len(b)-len(parts[2])-1]...)
|
||||
if err = verifyMac(h, b, parts[2]); err != nil {
|
||||
return err
|
||||
}
|
||||
// 4. Verify date ranges.
|
||||
var t1 int64
|
||||
if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil {
|
||||
return errors.New("securecookie: invalid timestamp")
|
||||
}
|
||||
t2 := s.timestamp()
|
||||
if s.minAge != 0 && t1 > t2-s.minAge {
|
||||
return errors.New("securecookie: timestamp is too new")
|
||||
}
|
||||
if s.maxAge != 0 && t1 < t2-s.maxAge {
|
||||
return errors.New("securecookie: expired timestamp")
|
||||
}
|
||||
// 5. Decrypt (optional).
|
||||
b, err = decode(parts[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.block != nil {
|
||||
if b, err = decrypt(s.block, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// 6. Deserialize.
|
||||
if err = deserialize(b, dst); err != nil {
|
||||
return err
|
||||
}
|
||||
// Done.
|
||||
return nil
|
||||
}
|
||||
|
||||
// timestamp returns the current timestamp, in seconds.
|
||||
//
|
||||
// For testing purposes, the function that generates the timestamp can be
|
||||
// overridden. If not set, it will return time.Now().UTC().Unix().
|
||||
func (s *SecureCookie) timestamp() int64 {
|
||||
if s.timeFunc == nil {
|
||||
return time.Now().UTC().Unix()
|
||||
}
|
||||
return s.timeFunc()
|
||||
}
|
||||
|
||||
// Authentication -------------------------------------------------------------
|
||||
|
||||
// createMac creates a message authentication code (MAC).
|
||||
func createMac(h hash.Hash, value []byte) []byte {
|
||||
h.Write(value)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// verifyMac verifies that a message authentication code (MAC) is valid.
|
||||
func verifyMac(h hash.Hash, value []byte, mac []byte) error {
|
||||
mac2 := createMac(h, value)
|
||||
if subtle.ConstantTimeCompare(mac, mac2) == 1 {
|
||||
return nil
|
||||
}
|
||||
return ErrMacInvalid
|
||||
}
|
||||
|
||||
// Encryption -----------------------------------------------------------------
|
||||
|
||||
// encrypt encrypts a value using the given block in counter mode.
|
||||
//
|
||||
// A random initialization vector (http://goo.gl/zF67k) with the length of the
|
||||
// block size is prepended to the resulting ciphertext.
|
||||
func encrypt(block cipher.Block, value []byte) ([]byte, error) {
|
||||
iv := GenerateRandomKey(block.BlockSize())
|
||||
if iv == nil {
|
||||
return nil, errors.New("securecookie: failed to generate random iv")
|
||||
}
|
||||
// Encrypt it.
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
stream.XORKeyStream(value, value)
|
||||
// Return iv + ciphertext.
|
||||
return append(iv, value...), nil
|
||||
}
|
||||
|
||||
// decrypt decrypts a value using the given block in counter mode.
|
||||
//
|
||||
// The value to be decrypted must be prepended by a initialization vector
|
||||
// (http://goo.gl/zF67k) with the length of the block size.
|
||||
func decrypt(block cipher.Block, value []byte) ([]byte, error) {
|
||||
size := block.BlockSize()
|
||||
if len(value) > size {
|
||||
// Extract iv.
|
||||
iv := value[:size]
|
||||
// Extract ciphertext.
|
||||
value = value[size:]
|
||||
// Decrypt it.
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
stream.XORKeyStream(value, value)
|
||||
return value, nil
|
||||
}
|
||||
return nil, errors.New("securecookie: the value could not be decrypted")
|
||||
}
|
||||
|
||||
// Serialization --------------------------------------------------------------
|
||||
|
||||
// serialize encodes a value using gob.
|
||||
func serialize(src interface{}) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
enc := gob.NewEncoder(buf)
|
||||
if err := enc.Encode(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// deserialize decodes a value using gob.
|
||||
func deserialize(src []byte, dst interface{}) error {
|
||||
dec := gob.NewDecoder(bytes.NewBuffer(src))
|
||||
if err := dec.Decode(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encoding -------------------------------------------------------------------
|
||||
|
||||
// encode encodes a value using base64.
|
||||
func encode(value []byte) []byte {
|
||||
encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value)))
|
||||
base64.URLEncoding.Encode(encoded, value)
|
||||
return encoded
|
||||
}
|
||||
|
||||
// decode decodes a cookie using base64.
|
||||
func decode(value []byte) ([]byte, error) {
|
||||
decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value)))
|
||||
b, err := base64.URLEncoding.Decode(decoded, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return decoded[:b], nil
|
||||
}
|
||||
|
||||
// Helpers --------------------------------------------------------------------
|
||||
|
||||
// GenerateRandomKey creates a random key with the given length in bytes.
|
||||
func GenerateRandomKey(length int) []byte {
|
||||
k := make([]byte, length)
|
||||
if _, err := io.ReadFull(rand.Reader, k); err != nil {
|
||||
return nil
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// CodecsFromPairs returns a slice of SecureCookie instances.
|
||||
//
|
||||
// It is a convenience function to create a list of codecs for key rotation.
|
||||
func CodecsFromPairs(keyPairs ...[]byte) []Codec {
|
||||
codecs := make([]Codec, len(keyPairs)/2+len(keyPairs)%2)
|
||||
for i := 0; i < len(keyPairs); i += 2 {
|
||||
var blockKey []byte
|
||||
if i+1 < len(keyPairs) {
|
||||
blockKey = keyPairs[i+1]
|
||||
}
|
||||
codecs[i/2] = New(keyPairs[i], blockKey)
|
||||
}
|
||||
return codecs
|
||||
}
|
||||
|
||||
// EncodeMulti encodes a cookie value using a group of codecs.
|
||||
//
|
||||
// The codecs are tried in order. Multiple codecs are accepted to allow
|
||||
// key rotation.
|
||||
func EncodeMulti(name string, value interface{}, codecs ...Codec) (string, error) {
|
||||
if len(codecs) == 0 {
|
||||
return "", errNoCodecs
|
||||
}
|
||||
|
||||
var errors MultiError
|
||||
for _, codec := range codecs {
|
||||
encoded, err := codec.Encode(name, value)
|
||||
if err == nil {
|
||||
return encoded, nil
|
||||
}
|
||||
errors = append(errors, err)
|
||||
}
|
||||
return "", errors
|
||||
}
|
||||
|
||||
// DecodeMulti decodes a cookie value using a group of codecs.
|
||||
//
|
||||
// The codecs are tried in order. Multiple codecs are accepted to allow
|
||||
// key rotation.
|
||||
func DecodeMulti(name string, value string, dst interface{}, codecs ...Codec) error {
|
||||
if len(codecs) == 0 {
|
||||
return errNoCodecs
|
||||
}
|
||||
|
||||
var errors MultiError
|
||||
for _, codec := range codecs {
|
||||
err := codec.Decode(name, value, dst)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errors = append(errors, err)
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
// MultiError groups multiple errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,20 @@
|
|||
sessions
|
||||
========
|
||||
|
||||
Store Implementations
|
||||
---------------------
|
||||
Other implementations of the sessions.Store interface:
|
||||
|
||||
* [github.com/starJammer/gorilla-sessions-arangodb](https://github.com/starJammer/gorilla-sessions-arangodb) - ArangoDB
|
||||
* [github.com/yosssi/boltstore](https://github.com/yosssi/boltstore) - Bolt
|
||||
* [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase
|
||||
* [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS
|
||||
* [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache
|
||||
* [github.com/hnakamur/gaesessions](https://github.com/hnakamur/gaesessions) - Memcache on GAE
|
||||
* [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB
|
||||
* [github.com/srinathgs/mysqlstore](https://github.com/srinathgs/mysqlstore) - MySQL
|
||||
* [github.com/antonlindstrom/pgstore](https://github.com/antonlindstrom/pgstore) - PostgreSQL
|
||||
* [github.com/boj/redistore](https://github.com/boj/redistore) - Redis
|
||||
* [github.com/boj/rethinkstore](https://github.com/boj/rethinkstore) - RethinkDB
|
||||
* [github.com/boj/riakstore](https://github.com/boj/riakstore) - Riak
|
||||
* [github.com/michaeljs1990/sqlitestore](https://github.com/michaeljs1990/sqlitestore) - SQLite
|
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gorilla/sessions provides cookie and filesystem sessions and
|
||||
infrastructure for custom session backends.
|
||||
|
||||
The key features are:
|
||||
|
||||
* Simple API: use it as an easy way to set signed (and optionally
|
||||
encrypted) cookies.
|
||||
* Built-in backends to store sessions in cookies or the filesystem.
|
||||
* Flash messages: session values that last until read.
|
||||
* Convenient way to switch session persistency (aka "remember me") and set
|
||||
other attributes.
|
||||
* Mechanism to rotate authentication and encryption keys.
|
||||
* Multiple sessions per request, even using different backends.
|
||||
* Interfaces and infrastructure for custom session backends: sessions from
|
||||
different stores can be retrieved and batch-saved using a common API.
|
||||
|
||||
Let's start with an example that shows the sessions API in a nutshell:
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"github.com/gorilla/sessions"
|
||||
)
|
||||
|
||||
var store = sessions.NewCookieStore([]byte("something-very-secret"))
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session. We're ignoring the error resulted from decoding an
|
||||
// existing session: Get() always returns a session, even if empty.
|
||||
session, _ := store.Get(r, "session-name")
|
||||
// Set some session values.
|
||||
session.Values["foo"] = "bar"
|
||||
session.Values[42] = 43
|
||||
// Save it.
|
||||
session.Save(r, w)
|
||||
}
|
||||
|
||||
First we initialize a session store calling NewCookieStore() and passing a
|
||||
secret key used to authenticate the session. Inside the handler, we call
|
||||
store.Get() to retrieve an existing session or a new one. Then we set some
|
||||
session values in session.Values, which is a map[interface{}]interface{}.
|
||||
And finally we call session.Save() to save the session in the response.
|
||||
|
||||
Note that in production code, we should check for errors when calling
|
||||
session.Save(r, w), and either display an error message or otherwise handle it.
|
||||
|
||||
Important Note: If you aren't using gorilla/mux, you need to wrap your handlers
|
||||
with context.ClearHandler as or else you will leak memory! An easy way to do this
|
||||
is to wrap the top-level mux when calling http.ListenAndServe:
|
||||
|
||||
http.ListenAndServe(":8080", context.ClearHandler(http.DefaultServeMux))
|
||||
|
||||
The ClearHandler function is provided by the gorilla/context package.
|
||||
|
||||
That's all you need to know for the basic usage. Let's take a look at other
|
||||
options, starting with flash messages.
|
||||
|
||||
Flash messages are session values that last until read. The term appeared with
|
||||
Ruby On Rails a few years back. When we request a flash message, it is removed
|
||||
from the session. To add a flash, call session.AddFlash(), and to get all
|
||||
flashes, call session.Flashes(). Here is an example:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session.
|
||||
session, _ := store.Get(r, "session-name")
|
||||
// Get the previously flashes, if any.
|
||||
if flashes := session.Flashes(); len(flashes) > 0 {
|
||||
// Just print the flash values.
|
||||
fmt.Fprint(w, "%v", flashes)
|
||||
} else {
|
||||
// Set a new flash.
|
||||
session.AddFlash("Hello, flash messages world!")
|
||||
fmt.Fprint(w, "No flashes found.")
|
||||
}
|
||||
session.Save(r, w)
|
||||
}
|
||||
|
||||
Flash messages are useful to set information to be read after a redirection,
|
||||
like after form submissions.
|
||||
|
||||
There may also be cases where you want to store a complex datatype within a
|
||||
session, such as a struct. Sessions are serialised using the encoding/gob package,
|
||||
so it is easy to register new datatypes for storage in sessions:
|
||||
|
||||
import(
|
||||
"encoding/gob"
|
||||
"github.com/gorilla/sessions"
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
Email string
|
||||
Age int
|
||||
}
|
||||
|
||||
type M map[string]interface{}
|
||||
|
||||
func init() {
|
||||
|
||||
gob.Register(&Person{})
|
||||
gob.Register(&M{})
|
||||
}
|
||||
|
||||
As it's not possible to pass a raw type as a parameter to a function, gob.Register()
|
||||
relies on us passing it an empty pointer to the type as a parameter. In the example
|
||||
above we've passed it a pointer to a struct and a pointer to a custom type
|
||||
representing a map[string]interface. This will then allow us to serialise/deserialise
|
||||
values of those types to and from our sessions.
|
||||
|
||||
By default, session cookies last for a month. This is probably too long for
|
||||
some cases, but it is easy to change this and other attributes during
|
||||
runtime. Sessions can be configured individually or the store can be
|
||||
configured and then all sessions saved using it will use that configuration.
|
||||
We access session.Options or store.Options to set a new configuration. The
|
||||
fields are basically a subset of http.Cookie fields. Let's change the
|
||||
maximum age of a session to one week:
|
||||
|
||||
session.Options = &sessions.Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 7,
|
||||
HttpOnly: true,
|
||||
}
|
||||
|
||||
Sometimes we may want to change authentication and/or encryption keys without
|
||||
breaking existing sessions. The CookieStore supports key rotation, and to use
|
||||
it you just need to set multiple authentication and encryption keys, in pairs,
|
||||
to be tested in order:
|
||||
|
||||
var store = sessions.NewCookieStore(
|
||||
[]byte("new-authentication-key"),
|
||||
[]byte("new-encryption-key"),
|
||||
[]byte("old-authentication-key"),
|
||||
[]byte("old-encryption-key"),
|
||||
)
|
||||
|
||||
New sessions will be saved using the first pair. Old sessions can still be
|
||||
read because the first pair will fail, and the second will be tested. This
|
||||
makes it easy to "rotate" secret keys and still be able to validate existing
|
||||
sessions. Note: for all pairs the encryption key is optional; set it to nil
|
||||
or omit it and and encryption won't be used.
|
||||
|
||||
Multiple sessions can be used in the same request, even with different
|
||||
session backends. When this happens, calling Save() on each session
|
||||
individually would be cumbersome, so we have a way to save all sessions
|
||||
at once: it's sessions.Save(). Here's an example:
|
||||
|
||||
var store = sessions.NewCookieStore([]byte("something-very-secret"))
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Get a session and set a value.
|
||||
session1, _ := store.Get(r, "session-one")
|
||||
session1.Values["foo"] = "bar"
|
||||
// Get another session and set another value.
|
||||
session2, _ := store.Get(r, "session-two")
|
||||
session2.Values[42] = 43
|
||||
// Save all sessions.
|
||||
sessions.Save(r, w)
|
||||
}
|
||||
|
||||
This is possible because when we call Get() from a session store, it adds the
|
||||
session to a common registry. Save() uses it to save all registered sessions.
|
||||
*/
|
||||
package sessions
|
|
@ -0,0 +1,234 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/context"
|
||||
)
|
||||
|
||||
// Default flashes key.
|
||||
const flashesKey = "_flash"
|
||||
|
||||
// Options --------------------------------------------------------------------
|
||||
|
||||
// Options stores configuration for a session or session store.
|
||||
//
|
||||
// Fields are a subset of http.Cookie fields.
|
||||
type Options struct {
|
||||
Path string
|
||||
Domain string
|
||||
// MaxAge=0 means no 'Max-Age' attribute specified.
|
||||
// MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'.
|
||||
// MaxAge>0 means Max-Age attribute present and given in seconds.
|
||||
MaxAge int
|
||||
Secure bool
|
||||
HttpOnly bool
|
||||
}
|
||||
|
||||
// Session --------------------------------------------------------------------
|
||||
|
||||
// NewSession is called by session stores to create a new session instance.
|
||||
func NewSession(store Store, name string) *Session {
|
||||
return &Session{
|
||||
Values: make(map[interface{}]interface{}),
|
||||
store: store,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Session stores the values and optional configuration for a session.
|
||||
type Session struct {
|
||||
ID string
|
||||
Values map[interface{}]interface{}
|
||||
Options *Options
|
||||
IsNew bool
|
||||
store Store
|
||||
name string
|
||||
}
|
||||
|
||||
// Flashes returns a slice of flash messages from the session.
|
||||
//
|
||||
// A single variadic argument is accepted, and it is optional: it defines
|
||||
// the flash key. If not defined "_flash" is used by default.
|
||||
func (s *Session) Flashes(vars ...string) []interface{} {
|
||||
var flashes []interface{}
|
||||
key := flashesKey
|
||||
if len(vars) > 0 {
|
||||
key = vars[0]
|
||||
}
|
||||
if v, ok := s.Values[key]; ok {
|
||||
// Drop the flashes and return it.
|
||||
delete(s.Values, key)
|
||||
flashes = v.([]interface{})
|
||||
}
|
||||
return flashes
|
||||
}
|
||||
|
||||
// AddFlash adds a flash message to the session.
|
||||
//
|
||||
// A single variadic argument is accepted, and it is optional: it defines
|
||||
// the flash key. If not defined "_flash" is used by default.
|
||||
func (s *Session) AddFlash(value interface{}, vars ...string) {
|
||||
key := flashesKey
|
||||
if len(vars) > 0 {
|
||||
key = vars[0]
|
||||
}
|
||||
var flashes []interface{}
|
||||
if v, ok := s.Values[key]; ok {
|
||||
flashes = v.([]interface{})
|
||||
}
|
||||
s.Values[key] = append(flashes, value)
|
||||
}
|
||||
|
||||
// Save is a convenience method to save this session. It is the same as calling
|
||||
// store.Save(request, response, session)
|
||||
func (s *Session) Save(r *http.Request, w http.ResponseWriter) error {
|
||||
return s.store.Save(r, w, s)
|
||||
}
|
||||
|
||||
// Name returns the name used to register the session.
|
||||
func (s *Session) Name() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Store returns the session store used to register the session.
|
||||
func (s *Session) Store() Store {
|
||||
return s.store
|
||||
}
|
||||
|
||||
// Registry -------------------------------------------------------------------
|
||||
|
||||
// sessionInfo stores a session tracked by the registry.
|
||||
type sessionInfo struct {
|
||||
s *Session
|
||||
e error
|
||||
}
|
||||
|
||||
// contextKey is the type used to store the registry in the context.
|
||||
type contextKey int
|
||||
|
||||
// registryKey is the key used to store the registry in the context.
|
||||
const registryKey contextKey = 0
|
||||
|
||||
// GetRegistry returns a registry instance for the current request.
|
||||
func GetRegistry(r *http.Request) *Registry {
|
||||
registry := context.Get(r, registryKey)
|
||||
if registry != nil {
|
||||
return registry.(*Registry)
|
||||
}
|
||||
newRegistry := &Registry{
|
||||
request: r,
|
||||
sessions: make(map[string]sessionInfo),
|
||||
}
|
||||
context.Set(r, registryKey, newRegistry)
|
||||
return newRegistry
|
||||
}
|
||||
|
||||
// Registry stores sessions used during a request.
|
||||
type Registry struct {
|
||||
request *http.Request
|
||||
sessions map[string]sessionInfo
|
||||
}
|
||||
|
||||
// Get registers and returns a session for the given name and session store.
|
||||
//
|
||||
// It returns a new session if there are no sessions registered for the name.
|
||||
func (s *Registry) Get(store Store, name string) (session *Session, err error) {
|
||||
if info, ok := s.sessions[name]; ok {
|
||||
session, err = info.s, info.e
|
||||
} else {
|
||||
session, err = store.New(s.request, name)
|
||||
session.name = name
|
||||
s.sessions[name] = sessionInfo{s: session, e: err}
|
||||
}
|
||||
session.store = store
|
||||
return
|
||||
}
|
||||
|
||||
// Save saves all sessions registered for the current request.
|
||||
func (s *Registry) Save(w http.ResponseWriter) error {
|
||||
var errMulti MultiError
|
||||
for name, info := range s.sessions {
|
||||
session := info.s
|
||||
if session.store == nil {
|
||||
errMulti = append(errMulti, fmt.Errorf(
|
||||
"sessions: missing store for session %q", name))
|
||||
} else if err := session.store.Save(s.request, w, session); err != nil {
|
||||
errMulti = append(errMulti, fmt.Errorf(
|
||||
"sessions: error saving session %q -- %v", name, err))
|
||||
}
|
||||
}
|
||||
if errMulti != nil {
|
||||
return errMulti
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helpers --------------------------------------------------------------------
|
||||
|
||||
func init() {
|
||||
gob.Register([]interface{}{})
|
||||
}
|
||||
|
||||
// Save saves all sessions used during the current request.
|
||||
func Save(r *http.Request, w http.ResponseWriter) error {
|
||||
return GetRegistry(r).Save(w)
|
||||
}
|
||||
|
||||
// NewCookie returns an http.Cookie with the options set. It also sets
|
||||
// the Expires field calculated based on the MaxAge value, for Internet
|
||||
// Explorer compatibility.
|
||||
func NewCookie(name, value string, options *Options) *http.Cookie {
|
||||
cookie := &http.Cookie{
|
||||
Name: name,
|
||||
Value: value,
|
||||
Path: options.Path,
|
||||
Domain: options.Domain,
|
||||
MaxAge: options.MaxAge,
|
||||
Secure: options.Secure,
|
||||
HttpOnly: options.HttpOnly,
|
||||
}
|
||||
if options.MaxAge > 0 {
|
||||
d := time.Duration(options.MaxAge) * time.Second
|
||||
cookie.Expires = time.Now().Add(d)
|
||||
} else if options.MaxAge < 0 {
|
||||
// Set it to the past to expire now.
|
||||
cookie.Expires = time.Unix(1, 0)
|
||||
}
|
||||
return cookie
|
||||
}
|
||||
|
||||
// Error ----------------------------------------------------------------------
|
||||
|
||||
// MultiError stores multiple errors.
|
||||
//
|
||||
// Borrowed from the App Engine SDK.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sessions
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/gorilla/securecookie"
|
||||
)
|
||||
|
||||
// Store is an interface for custom session stores.
|
||||
//
|
||||
// See CookieStore and FilesystemStore for examples.
|
||||
type Store interface {
|
||||
// Get should return a cached session.
|
||||
Get(r *http.Request, name string) (*Session, error)
|
||||
|
||||
// New should create and return a new session.
|
||||
//
|
||||
// Note that New should never return a nil session, even in the case of
|
||||
// an error if using the Registry infrastructure to cache the session.
|
||||
New(r *http.Request, name string) (*Session, error)
|
||||
|
||||
// Save should persist session to the underlying store implementation.
|
||||
Save(r *http.Request, w http.ResponseWriter, s *Session) error
|
||||
}
|
||||
|
||||
// CookieStore ----------------------------------------------------------------
|
||||
|
||||
// NewCookieStore returns a new CookieStore.
|
||||
//
|
||||
// Keys are defined in pairs to allow key rotation, but the common case is
|
||||
// to set a single authentication key and optionally an encryption key.
|
||||
//
|
||||
// The first key in a pair is used for authentication and the second for
|
||||
// encryption. The encryption key can be set to nil or omitted in the last
|
||||
// pair, but the authentication key is required in all pairs.
|
||||
//
|
||||
// It is recommended to use an authentication key with 32 or 64 bytes.
|
||||
// The encryption key, if set, must be either 16, 24, or 32 bytes to select
|
||||
// AES-128, AES-192, or AES-256 modes.
|
||||
//
|
||||
// Use the convenience function securecookie.GenerateRandomKey() to create
|
||||
// strong keys.
|
||||
func NewCookieStore(keyPairs ...[]byte) *CookieStore {
|
||||
return &CookieStore{
|
||||
Codecs: securecookie.CodecsFromPairs(keyPairs...),
|
||||
Options: &Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 30,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CookieStore stores sessions using secure cookies.
|
||||
type CookieStore struct {
|
||||
Codecs []securecookie.Codec
|
||||
Options *Options // default configuration
|
||||
}
|
||||
|
||||
// Get returns a session for the given name after adding it to the registry.
|
||||
//
|
||||
// It returns a new session if the sessions doesn't exist. Access IsNew on
|
||||
// the session to check if it is an existing session or a new one.
|
||||
//
|
||||
// It returns a new session and an error if the session exists but could
|
||||
// not be decoded.
|
||||
func (s *CookieStore) Get(r *http.Request, name string) (*Session, error) {
|
||||
return GetRegistry(r).Get(s, name)
|
||||
}
|
||||
|
||||
// New returns a session for the given name without adding it to the registry.
|
||||
//
|
||||
// The difference between New() and Get() is that calling New() twice will
|
||||
// decode the session data twice, while Get() registers and reuses the same
|
||||
// decoded session after the first call.
|
||||
func (s *CookieStore) New(r *http.Request, name string) (*Session, error) {
|
||||
session := NewSession(s, name)
|
||||
opts := *s.Options
|
||||
session.Options = &opts
|
||||
session.IsNew = true
|
||||
var err error
|
||||
if c, errCookie := r.Cookie(name); errCookie == nil {
|
||||
err = securecookie.DecodeMulti(name, c.Value, &session.Values,
|
||||
s.Codecs...)
|
||||
if err == nil {
|
||||
session.IsNew = false
|
||||
}
|
||||
}
|
||||
return session, err
|
||||
}
|
||||
|
||||
// Save adds a single session to the response.
|
||||
func (s *CookieStore) Save(r *http.Request, w http.ResponseWriter,
|
||||
session *Session) error {
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.Values,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options))
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilesystemStore ------------------------------------------------------------
|
||||
|
||||
var fileMutex sync.RWMutex
|
||||
|
||||
// NewFilesystemStore returns a new FilesystemStore.
|
||||
//
|
||||
// The path argument is the directory where sessions will be saved. If empty
|
||||
// it will use os.TempDir().
|
||||
//
|
||||
// See NewCookieStore() for a description of the other parameters.
|
||||
func NewFilesystemStore(path string, keyPairs ...[]byte) *FilesystemStore {
|
||||
if path == "" {
|
||||
path = os.TempDir()
|
||||
}
|
||||
return &FilesystemStore{
|
||||
Codecs: securecookie.CodecsFromPairs(keyPairs...),
|
||||
Options: &Options{
|
||||
Path: "/",
|
||||
MaxAge: 86400 * 30,
|
||||
},
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
// FilesystemStore stores sessions in the filesystem.
|
||||
//
|
||||
// It also serves as a referece for custom stores.
|
||||
//
|
||||
// This store is still experimental and not well tested. Feedback is welcome.
|
||||
type FilesystemStore struct {
|
||||
Codecs []securecookie.Codec
|
||||
Options *Options // default configuration
|
||||
path string
|
||||
}
|
||||
|
||||
// MaxLength restricts the maximum length of new sessions to l.
|
||||
// If l is 0 there is no limit to the size of a session, use with caution.
|
||||
// The default for a new FilesystemStore is 4096.
|
||||
func (s *FilesystemStore) MaxLength(l int) {
|
||||
for _, c := range s.Codecs {
|
||||
if codec, ok := c.(*securecookie.SecureCookie); ok {
|
||||
codec.MaxLength(l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a session for the given name after adding it to the registry.
|
||||
//
|
||||
// See CookieStore.Get().
|
||||
func (s *FilesystemStore) Get(r *http.Request, name string) (*Session, error) {
|
||||
return GetRegistry(r).Get(s, name)
|
||||
}
|
||||
|
||||
// New returns a session for the given name without adding it to the registry.
|
||||
//
|
||||
// See CookieStore.New().
|
||||
func (s *FilesystemStore) New(r *http.Request, name string) (*Session, error) {
|
||||
session := NewSession(s, name)
|
||||
opts := *s.Options
|
||||
session.Options = &opts
|
||||
session.IsNew = true
|
||||
var err error
|
||||
if c, errCookie := r.Cookie(name); errCookie == nil {
|
||||
err = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...)
|
||||
if err == nil {
|
||||
err = s.load(session)
|
||||
if err == nil {
|
||||
session.IsNew = false
|
||||
}
|
||||
}
|
||||
}
|
||||
return session, err
|
||||
}
|
||||
|
||||
// Save adds a single session to the response.
|
||||
func (s *FilesystemStore) Save(r *http.Request, w http.ResponseWriter,
|
||||
session *Session) error {
|
||||
if session.ID == "" {
|
||||
// Because the ID is used in the filename, encode it to
|
||||
// use alphanumeric characters only.
|
||||
session.ID = strings.TrimRight(
|
||||
base32.StdEncoding.EncodeToString(
|
||||
securecookie.GenerateRandomKey(32)), "=")
|
||||
}
|
||||
if err := s.save(session); err != nil {
|
||||
return err
|
||||
}
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.ID,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
http.SetCookie(w, NewCookie(session.Name(), encoded, session.Options))
|
||||
return nil
|
||||
}
|
||||
|
||||
// save writes encoded session.Values to a file.
|
||||
func (s *FilesystemStore) save(session *Session) error {
|
||||
encoded, err := securecookie.EncodeMulti(session.Name(), session.Values,
|
||||
s.Codecs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := filepath.Join(s.path, "session_"+session.ID)
|
||||
fileMutex.Lock()
|
||||
defer fileMutex.Unlock()
|
||||
return ioutil.WriteFile(filename, []byte(encoded), 0600)
|
||||
}
|
||||
|
||||
// load reads a file and decodes its content into session.Values.
|
||||
func (s *FilesystemStore) load(session *Session) error {
|
||||
filename := filepath.Join(s.path, "session_"+session.ID)
|
||||
fileMutex.RLock()
|
||||
defer fileMutex.RUnlock()
|
||||
fdata, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = securecookie.DecodeMulti(session.Name(), string(fdata),
|
||||
&session.Values, s.Codecs...); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,59 @@
|
|||
# Gorilla WebSocket
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](http://godoc.org/github.com/gorilla/websocket)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="http://autobahn.ws/testsuite/">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td>Limit size of received message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.SetReadLimit">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=5082">No</a></td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
|
@ -0,0 +1,269 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
acceptKey := computeAcceptKey(challengeKey)
|
||||
|
||||
c = newConn(netConn, false, readBufSize, writeBufSize)
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "GET "...)
|
||||
p = append(p, u.RequestURI()...)
|
||||
p = append(p, " HTTP/1.1\r\nHost: "...)
|
||||
p = append(p, u.Host...)
|
||||
// "Upgrade" is capitalized for servers that do not use case insensitive
|
||||
// comparisons on header tokens.
|
||||
p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...)
|
||||
p = append(p, challengeKey...)
|
||||
p = append(p, "\r\n"...)
|
||||
for k, vs := range requestHeader {
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
p = append(p, v...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
if _, err := netConn.Write(p); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != acceptKey {
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
return c, resp, nil
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// Input and output buffer sizes. If the buffer size is zero, then a
|
||||
// default value of 4096 is used.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
// parseURL parses the URL. The url.Parse function is not used here because
|
||||
// url.Parse mangles the path.
|
||||
func parseURL(s string) (*url.URL, error) {
|
||||
// From the RFC:
|
||||
//
|
||||
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
|
||||
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
|
||||
//
|
||||
// We don't use the net/url parser here because the dialer interface does
|
||||
// not provide a way for applications to work around percent deocding in
|
||||
// the net/url parser.
|
||||
|
||||
var u url.URL
|
||||
switch {
|
||||
case strings.HasPrefix(s, "ws://"):
|
||||
u.Scheme = "ws"
|
||||
s = s[len("ws://"):]
|
||||
case strings.HasPrefix(s, "wss://"):
|
||||
u.Scheme = "wss"
|
||||
s = s[len("wss://"):]
|
||||
default:
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
u.Host = s
|
||||
u.Opaque = "/"
|
||||
if i := strings.Index(s, "/"); i >= 0 {
|
||||
u.Host = s[:i]
|
||||
u.Opaque = s[i:]
|
||||
}
|
||||
|
||||
if strings.Contains(u.Host, "@") {
|
||||
// WebSocket URIs do not contain user information.
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
if u.Scheme == "wss" {
|
||||
hostPort += ":443"
|
||||
} else {
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default zero values.
|
||||
var DefaultDialer *Dialer
|
||||
|
||||
// Dial creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
u, err := parseURL(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
|
||||
if d == nil {
|
||||
d = &Dialer{}
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if d.HandshakeTimeout != 0 {
|
||||
deadline = time.Now().Add(d.HandshakeTimeout)
|
||||
}
|
||||
|
||||
netDial := d.NetDial
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{Deadline: deadline}
|
||||
netDial = netDialer.Dial
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := netConn.SetDeadline(deadline); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if u.Scheme == "wss" {
|
||||
cfg := d.TLSClientConfig
|
||||
if cfg == nil {
|
||||
cfg = &tls.Config{ServerName: hostNoPort}
|
||||
} else if cfg.ServerName == "" {
|
||||
shallowCopy := *cfg
|
||||
cfg = &shallowCopy
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.Subprotocols) > 0 {
|
||||
h := http.Header{}
|
||||
for k, v := range requestHeader {
|
||||
h[k] = v
|
||||
}
|
||||
h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", "))
|
||||
requestHeader = h
|
||||
}
|
||||
|
||||
if len(requestHeader["Host"]) > 0 {
|
||||
// This can be used to supply a Host: header which is different from
|
||||
// the dial address.
|
||||
u.Host = requestHeader.Get("Host")
|
||||
|
||||
// Drop "Host" header
|
||||
h := http.Header{}
|
||||
for k, v := range requestHeader {
|
||||
if k == "Host" {
|
||||
continue
|
||||
}
|
||||
h[k] = v
|
||||
}
|
||||
requestHeader = h
|
||||
}
|
||||
|
||||
conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize)
|
||||
|
||||
if err != nil {
|
||||
if err == ErrBadHandshake {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
}
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
|
@ -0,0 +1,825 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask
|
||||
maxControlFramePayloadSize = 125
|
||||
finalBit = 1 << 7
|
||||
maskBit = 1 << 7
|
||||
writeWait = time.Second
|
||||
|
||||
defaultReadBufferSize = 4096
|
||||
defaultWriteBufferSize = 4096
|
||||
|
||||
continuationFrame = 0
|
||||
noFrame = -1
|
||||
)
|
||||
|
||||
// Close codes defined in RFC 6455, section 11.7.
|
||||
const (
|
||||
CloseNormalClosure = 1000
|
||||
CloseGoingAway = 1001
|
||||
CloseProtocolError = 1002
|
||||
CloseUnsupportedData = 1003
|
||||
CloseNoStatusReceived = 1005
|
||||
CloseAbnormalClosure = 1006
|
||||
CloseInvalidFramePayloadData = 1007
|
||||
ClosePolicyViolation = 1008
|
||||
CloseMessageTooBig = 1009
|
||||
CloseMandatoryExtension = 1010
|
||||
CloseInternalServerErr = 1011
|
||||
CloseTLSHandshake = 1015
|
||||
)
|
||||
|
||||
// The message types are defined in RFC 6455, section 11.8.
|
||||
const (
|
||||
// TextMessage denotes a text data message. The text message payload is
|
||||
// interpreted as UTF-8 encoded text data.
|
||||
TextMessage = 1
|
||||
|
||||
// BinaryMessage denotes a binary data message.
|
||||
BinaryMessage = 2
|
||||
|
||||
// CloseMessage denotes a close control message. The optional message
|
||||
// payload contains a numeric code and text. Use the FormatCloseMessage
|
||||
// function to format a close message payload.
|
||||
CloseMessage = 8
|
||||
|
||||
// PingMessage denotes a ping control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PingMessage = 9
|
||||
|
||||
// PongMessage denotes a ping control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PongMessage = 10
|
||||
)
|
||||
|
||||
// ErrCloseSent is returned when the application writes a message to the
|
||||
// connection after sending a close message.
|
||||
var ErrCloseSent = errors.New("websocket: close sent")
|
||||
|
||||
// ErrReadLimit is returned when reading a message that is larger than the
|
||||
// read limit set for the connection.
|
||||
var ErrReadLimit = errors.New("websocket: read limit exceeded")
|
||||
|
||||
// netError satisfies the net Error interface.
|
||||
type netError struct {
|
||||
msg string
|
||||
temporary bool
|
||||
timeout bool
|
||||
}
|
||||
|
||||
func (e *netError) Error() string { return e.msg }
|
||||
func (e *netError) Temporary() bool { return e.temporary }
|
||||
func (e *netError) Timeout() bool { return e.timeout }
|
||||
|
||||
// closeError represents close frame.
|
||||
type closeError struct {
|
||||
code int
|
||||
text string
|
||||
}
|
||||
|
||||
func (e *closeError) Error() string {
|
||||
return "websocket: close " + strconv.Itoa(e.code) + " " + e.text
|
||||
}
|
||||
|
||||
var (
|
||||
errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true}
|
||||
errUnexpectedEOF = &closeError{code: CloseAbnormalClosure, text: io.ErrUnexpectedEOF.Error()}
|
||||
errBadWriteOpCode = errors.New("websocket: bad write message type")
|
||||
errWriteClosed = errors.New("websocket: write closed")
|
||||
errInvalidControlFrame = errors.New("websocket: invalid control frame")
|
||||
)
|
||||
|
||||
func hideTempErr(err error) error {
|
||||
if e, ok := err.(net.Error); ok && e.Temporary() {
|
||||
err = &netError{msg: e.Error(), timeout: e.Timeout()}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func isControl(frameType int) bool {
|
||||
return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage
|
||||
}
|
||||
|
||||
func isData(frameType int) bool {
|
||||
return frameType == TextMessage || frameType == BinaryMessage
|
||||
}
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
func newMaskKey() [4]byte {
|
||||
n := rand.Uint32()
|
||||
return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}
|
||||
}
|
||||
|
||||
// Conn represents a WebSocket connection.
|
||||
type Conn struct {
|
||||
conn net.Conn
|
||||
isServer bool
|
||||
subprotocol string
|
||||
|
||||
// Write fields
|
||||
mu chan bool // used as mutex to protect write to conn and closeSent
|
||||
closeSent bool // true if close message was sent
|
||||
|
||||
// Message writer fields.
|
||||
writeErr error
|
||||
writeBuf []byte // frame is constructed in this buffer.
|
||||
writePos int // end of data in writeBuf.
|
||||
writeFrameType int // type of the current frame.
|
||||
writeSeq int // incremented to invalidate message writers.
|
||||
writeDeadline time.Time
|
||||
|
||||
// Read fields
|
||||
readErr error
|
||||
br *bufio.Reader
|
||||
readRemaining int64 // bytes remaining in current frame.
|
||||
readFinal bool // true the current message has more frames.
|
||||
readSeq int // incremented to invalidate message readers.
|
||||
readLength int64 // Message size.
|
||||
readLimit int64 // Maximum message size.
|
||||
readMaskPos int
|
||||
readMaskKey [4]byte
|
||||
handlePong func(string) error
|
||||
handlePing func(string) error
|
||||
}
|
||||
|
||||
func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn {
|
||||
mu := make(chan bool, 1)
|
||||
mu <- true
|
||||
|
||||
if readBufferSize == 0 {
|
||||
readBufferSize = defaultReadBufferSize
|
||||
}
|
||||
if writeBufferSize == 0 {
|
||||
writeBufferSize = defaultWriteBufferSize
|
||||
}
|
||||
|
||||
c := &Conn{
|
||||
isServer: isServer,
|
||||
br: bufio.NewReaderSize(conn, readBufferSize),
|
||||
conn: conn,
|
||||
mu: mu,
|
||||
readFinal: true,
|
||||
writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize),
|
||||
writeFrameType: noFrame,
|
||||
writePos: maxFrameHeaderSize,
|
||||
}
|
||||
c.SetPingHandler(nil)
|
||||
c.SetPongHandler(nil)
|
||||
return c
|
||||
}
|
||||
|
||||
// Subprotocol returns the negotiated protocol for the connection.
|
||||
func (c *Conn) Subprotocol() string {
|
||||
return c.subprotocol
|
||||
}
|
||||
|
||||
// Close closes the underlying network connection without sending or waiting for a close frame.
|
||||
func (c *Conn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// LocalAddr returns the local network address.
|
||||
func (c *Conn) LocalAddr() net.Addr {
|
||||
return c.conn.LocalAddr()
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (c *Conn) RemoteAddr() net.Addr {
|
||||
return c.conn.RemoteAddr()
|
||||
}
|
||||
|
||||
// Write methods
|
||||
|
||||
func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
|
||||
<-c.mu
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
if c.closeSent {
|
||||
return ErrCloseSent
|
||||
} else if frameType == CloseMessage {
|
||||
c.closeSent = true
|
||||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
n, err := c.conn.Write(buf)
|
||||
if n != len(buf) {
|
||||
// Close on partial write.
|
||||
c.conn.Close()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteControl writes a control message with the given deadline. The allowed
|
||||
// message types are CloseMessage, PingMessage and PongMessage.
|
||||
func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error {
|
||||
if !isControl(messageType) {
|
||||
return errBadWriteOpCode
|
||||
}
|
||||
if len(data) > maxControlFramePayloadSize {
|
||||
return errInvalidControlFrame
|
||||
}
|
||||
|
||||
b0 := byte(messageType) | finalBit
|
||||
b1 := byte(len(data))
|
||||
if !c.isServer {
|
||||
b1 |= maskBit
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize)
|
||||
buf = append(buf, b0, b1)
|
||||
|
||||
if c.isServer {
|
||||
buf = append(buf, data...)
|
||||
} else {
|
||||
key := newMaskKey()
|
||||
buf = append(buf, key[:]...)
|
||||
buf = append(buf, data...)
|
||||
maskBytes(key, 0, buf[6:])
|
||||
}
|
||||
|
||||
d := time.Hour * 1000
|
||||
if !deadline.IsZero() {
|
||||
d = deadline.Sub(time.Now())
|
||||
if d < 0 {
|
||||
return errWriteTimeout
|
||||
}
|
||||
}
|
||||
|
||||
timer := time.NewTimer(d)
|
||||
select {
|
||||
case <-c.mu:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
return errWriteTimeout
|
||||
}
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
if c.closeSent {
|
||||
return ErrCloseSent
|
||||
} else if messageType == CloseMessage {
|
||||
c.closeSent = true
|
||||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
n, err := c.conn.Write(buf)
|
||||
if n != 0 && n != len(buf) {
|
||||
c.conn.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NextWriter returns a writer for the next message to send. The writer's
|
||||
// Close method flushes the complete message to the network.
|
||||
//
|
||||
// There can be at most one open writer on a connection. NextWriter closes the
|
||||
// previous writer if the application has not already done so.
|
||||
//
|
||||
// The NextWriter method and the writers returned from the method cannot be
|
||||
// accessed by more than one goroutine at a time.
|
||||
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
|
||||
if c.writeErr != nil {
|
||||
return nil, c.writeErr
|
||||
}
|
||||
|
||||
if c.writeFrameType != noFrame {
|
||||
if err := c.flushFrame(true, nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !isControl(messageType) && !isData(messageType) {
|
||||
return nil, errBadWriteOpCode
|
||||
}
|
||||
|
||||
c.writeFrameType = messageType
|
||||
return messageWriter{c, c.writeSeq}, nil
|
||||
}
|
||||
|
||||
func (c *Conn) flushFrame(final bool, extra []byte) error {
|
||||
length := c.writePos - maxFrameHeaderSize + len(extra)
|
||||
|
||||
// Check for invalid control frames.
|
||||
if isControl(c.writeFrameType) &&
|
||||
(!final || length > maxControlFramePayloadSize) {
|
||||
c.writeSeq++
|
||||
c.writeFrameType = noFrame
|
||||
c.writePos = maxFrameHeaderSize
|
||||
return errInvalidControlFrame
|
||||
}
|
||||
|
||||
b0 := byte(c.writeFrameType)
|
||||
if final {
|
||||
b0 |= finalBit
|
||||
}
|
||||
b1 := byte(0)
|
||||
if !c.isServer {
|
||||
b1 |= maskBit
|
||||
}
|
||||
|
||||
// Assume that the frame starts at beginning of c.writeBuf.
|
||||
framePos := 0
|
||||
if c.isServer {
|
||||
// Adjust up if mask not included in the header.
|
||||
framePos = 4
|
||||
}
|
||||
|
||||
switch {
|
||||
case length >= 65536:
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | 127
|
||||
binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length))
|
||||
case length > 125:
|
||||
framePos += 6
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | 126
|
||||
binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length))
|
||||
default:
|
||||
framePos += 8
|
||||
c.writeBuf[framePos] = b0
|
||||
c.writeBuf[framePos+1] = b1 | byte(length)
|
||||
}
|
||||
|
||||
if !c.isServer {
|
||||
key := newMaskKey()
|
||||
copy(c.writeBuf[maxFrameHeaderSize-4:], key[:])
|
||||
maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos])
|
||||
if len(extra) > 0 {
|
||||
c.writeErr = errors.New("websocket: internal error, extra used in client mode")
|
||||
return c.writeErr
|
||||
}
|
||||
}
|
||||
|
||||
// Write the buffers to the connection.
|
||||
c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra)
|
||||
|
||||
// Setup for next frame.
|
||||
c.writePos = maxFrameHeaderSize
|
||||
c.writeFrameType = continuationFrame
|
||||
if final {
|
||||
c.writeSeq++
|
||||
c.writeFrameType = noFrame
|
||||
}
|
||||
return c.writeErr
|
||||
}
|
||||
|
||||
type messageWriter struct {
|
||||
c *Conn
|
||||
seq int
|
||||
}
|
||||
|
||||
func (w messageWriter) err() error {
|
||||
c := w.c
|
||||
if c.writeSeq != w.seq {
|
||||
return errWriteClosed
|
||||
}
|
||||
if c.writeErr != nil {
|
||||
return c.writeErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w messageWriter) ncopy(max int) (int, error) {
|
||||
n := len(w.c.writeBuf) - w.c.writePos
|
||||
if n <= 0 {
|
||||
if err := w.c.flushFrame(false, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n = len(w.c.writeBuf) - w.c.writePos
|
||||
}
|
||||
if n > max {
|
||||
n = max
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) write(final bool, p []byte) (int, error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(p) > 2*len(w.c.writeBuf) && w.c.isServer {
|
||||
// Don't buffer large messages.
|
||||
err := w.c.flushFrame(final, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
nn := len(p)
|
||||
for len(p) > 0 {
|
||||
n, err := w.ncopy(len(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(w.c.writeBuf[w.c.writePos:], p[:n])
|
||||
w.c.writePos += n
|
||||
p = p[n:]
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) Write(p []byte) (int, error) {
|
||||
return w.write(false, p)
|
||||
}
|
||||
|
||||
func (w messageWriter) WriteString(p string) (int, error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
nn := len(p)
|
||||
for len(p) > 0 {
|
||||
n, err := w.ncopy(len(p))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(w.c.writeBuf[w.c.writePos:], p[:n])
|
||||
w.c.writePos += n
|
||||
p = p[n:]
|
||||
}
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) {
|
||||
if err := w.err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for {
|
||||
if w.c.writePos == len(w.c.writeBuf) {
|
||||
err = w.c.flushFrame(false, nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
var n int
|
||||
n, err = r.Read(w.c.writeBuf[w.c.writePos:])
|
||||
w.c.writePos += n
|
||||
nn += int64(n)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return nn, err
|
||||
}
|
||||
|
||||
func (w messageWriter) Close() error {
|
||||
if err := w.err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.c.flushFrame(true, nil)
|
||||
}
|
||||
|
||||
// WriteMessage is a helper method for getting a writer using NextWriter,
|
||||
// writing the message and closing the writer.
|
||||
func (c *Conn) WriteMessage(messageType int, data []byte) error {
|
||||
wr, err := c.NextWriter(messageType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := wr.(messageWriter)
|
||||
if _, err := w.write(true, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.writeSeq == w.seq {
|
||||
if err := c.flushFrame(true, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetWriteDeadline sets the write deadline on the underlying network
|
||||
// connection. After a write has timed out, the websocket state is corrupt and
|
||||
// all future writes will return an error. A zero value for t means writes will
|
||||
// not time out.
|
||||
func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
c.writeDeadline = t
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read methods
|
||||
|
||||
// readFull is like io.ReadFull except that io.EOF is never returned.
|
||||
func (c *Conn) readFull(p []byte) (err error) {
|
||||
var n int
|
||||
for n < len(p) && err == nil {
|
||||
var nn int
|
||||
nn, err = c.br.Read(p[n:])
|
||||
n += nn
|
||||
}
|
||||
if n == len(p) {
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Conn) advanceFrame() (int, error) {
|
||||
|
||||
// 1. Skip remainder of previous frame.
|
||||
|
||||
if c.readRemaining > 0 {
|
||||
if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Read and parse first two bytes of frame header.
|
||||
|
||||
var b [8]byte
|
||||
if err := c.readFull(b[:2]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
|
||||
final := b[0]&finalBit != 0
|
||||
frameType := int(b[0] & 0xf)
|
||||
reserved := int((b[0] >> 4) & 0x7)
|
||||
mask := b[1]&maskBit != 0
|
||||
c.readRemaining = int64(b[1] & 0x7f)
|
||||
|
||||
if reserved != 0 {
|
||||
return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved))
|
||||
}
|
||||
|
||||
switch frameType {
|
||||
case CloseMessage, PingMessage, PongMessage:
|
||||
if c.readRemaining > maxControlFramePayloadSize {
|
||||
return noFrame, c.handleProtocolError("control frame length > 125")
|
||||
}
|
||||
if !final {
|
||||
return noFrame, c.handleProtocolError("control frame not final")
|
||||
}
|
||||
case TextMessage, BinaryMessage:
|
||||
if !c.readFinal {
|
||||
return noFrame, c.handleProtocolError("message start before final message frame")
|
||||
}
|
||||
c.readFinal = final
|
||||
case continuationFrame:
|
||||
if c.readFinal {
|
||||
return noFrame, c.handleProtocolError("continuation after final message frame")
|
||||
}
|
||||
c.readFinal = final
|
||||
default:
|
||||
return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType))
|
||||
}
|
||||
|
||||
// 3. Read and parse frame length.
|
||||
|
||||
switch c.readRemaining {
|
||||
case 126:
|
||||
if err := c.readFull(b[:2]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
c.readRemaining = int64(binary.BigEndian.Uint16(b[:2]))
|
||||
case 127:
|
||||
if err := c.readFull(b[:8]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
c.readRemaining = int64(binary.BigEndian.Uint64(b[:8]))
|
||||
}
|
||||
|
||||
// 4. Handle frame masking.
|
||||
|
||||
if mask != c.isServer {
|
||||
return noFrame, c.handleProtocolError("incorrect mask flag")
|
||||
}
|
||||
|
||||
if mask {
|
||||
c.readMaskPos = 0
|
||||
if err := c.readFull(c.readMaskKey[:]); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
}
|
||||
|
||||
// 5. For text and binary messages, enforce read limit and return.
|
||||
|
||||
if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage {
|
||||
|
||||
c.readLength += c.readRemaining
|
||||
if c.readLimit > 0 && c.readLength > c.readLimit {
|
||||
c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait))
|
||||
return noFrame, ErrReadLimit
|
||||
}
|
||||
|
||||
return frameType, nil
|
||||
}
|
||||
|
||||
// 6. Read control frame payload.
|
||||
|
||||
var payload []byte
|
||||
if c.readRemaining > 0 {
|
||||
payload = make([]byte, c.readRemaining)
|
||||
c.readRemaining = 0
|
||||
if err := c.readFull(payload); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
if c.isServer {
|
||||
maskBytes(c.readMaskKey, 0, payload)
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Process control frame payload.
|
||||
|
||||
switch frameType {
|
||||
case PongMessage:
|
||||
if err := c.handlePong(string(payload)); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
case PingMessage:
|
||||
if err := c.handlePing(string(payload)); err != nil {
|
||||
return noFrame, err
|
||||
}
|
||||
case CloseMessage:
|
||||
c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait))
|
||||
closeCode := CloseNoStatusReceived
|
||||
closeText := ""
|
||||
if len(payload) >= 2 {
|
||||
closeCode = int(binary.BigEndian.Uint16(payload))
|
||||
closeText = string(payload[2:])
|
||||
}
|
||||
switch closeCode {
|
||||
case CloseNormalClosure, CloseGoingAway:
|
||||
return noFrame, io.EOF
|
||||
default:
|
||||
return noFrame, &closeError{code: closeCode, text: closeText}
|
||||
}
|
||||
}
|
||||
|
||||
return frameType, nil
|
||||
}
|
||||
|
||||
func (c *Conn) handleProtocolError(message string) error {
|
||||
c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait))
|
||||
return errors.New("websocket: " + message)
|
||||
}
|
||||
|
||||
// NextReader returns the next data message received from the peer. The
|
||||
// returned messageType is either TextMessage or BinaryMessage.
|
||||
//
|
||||
// There can be at most one open reader on a connection. NextReader discards
|
||||
// the previous message if the application has not already consumed it.
|
||||
//
|
||||
// The NextReader method and the readers returned from the method cannot be
|
||||
// accessed by more than one goroutine at a time.
|
||||
func (c *Conn) NextReader() (messageType int, r io.Reader, err error) {
|
||||
|
||||
c.readSeq++
|
||||
c.readLength = 0
|
||||
|
||||
for c.readErr == nil {
|
||||
frameType, err := c.advanceFrame()
|
||||
if err != nil {
|
||||
c.readErr = hideTempErr(err)
|
||||
break
|
||||
}
|
||||
if frameType == TextMessage || frameType == BinaryMessage {
|
||||
return frameType, messageReader{c, c.readSeq}, nil
|
||||
}
|
||||
}
|
||||
return noFrame, nil, c.readErr
|
||||
}
|
||||
|
||||
type messageReader struct {
|
||||
c *Conn
|
||||
seq int
|
||||
}
|
||||
|
||||
func (r messageReader) Read(b []byte) (int, error) {
|
||||
|
||||
if r.seq != r.c.readSeq {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
for r.c.readErr == nil {
|
||||
|
||||
if r.c.readRemaining > 0 {
|
||||
if int64(len(b)) > r.c.readRemaining {
|
||||
b = b[:r.c.readRemaining]
|
||||
}
|
||||
n, err := r.c.br.Read(b)
|
||||
r.c.readErr = hideTempErr(err)
|
||||
if r.c.isServer {
|
||||
r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n])
|
||||
}
|
||||
r.c.readRemaining -= int64(n)
|
||||
return n, r.c.readErr
|
||||
}
|
||||
|
||||
if r.c.readFinal {
|
||||
r.c.readSeq++
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
frameType, err := r.c.advanceFrame()
|
||||
switch {
|
||||
case err != nil:
|
||||
r.c.readErr = hideTempErr(err)
|
||||
case frameType == TextMessage || frameType == BinaryMessage:
|
||||
r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader")
|
||||
}
|
||||
}
|
||||
|
||||
err := r.c.readErr
|
||||
if err == io.EOF && r.seq == r.c.readSeq {
|
||||
err = errUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// ReadMessage is a helper method for getting a reader using NextReader and
|
||||
// reading from that reader to a buffer.
|
||||
func (c *Conn) ReadMessage() (messageType int, p []byte, err error) {
|
||||
var r io.Reader
|
||||
messageType, r, err = c.NextReader()
|
||||
if err != nil {
|
||||
return messageType, nil, err
|
||||
}
|
||||
p, err = ioutil.ReadAll(r)
|
||||
return messageType, p, err
|
||||
}
|
||||
|
||||
// SetReadDeadline sets the read deadline on the underlying network connection.
|
||||
// After a read has timed out, the websocket connection state is corrupt and
|
||||
// all future reads will return an error. A zero value for t means reads will
|
||||
// not time out.
|
||||
func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
return c.conn.SetReadDeadline(t)
|
||||
}
|
||||
|
||||
// SetReadLimit sets the maximum size for a message read from the peer. If a
|
||||
// message exceeds the limit, the connection sends a close frame to the peer
|
||||
// and returns ErrReadLimit to the application.
|
||||
func (c *Conn) SetReadLimit(limit int64) {
|
||||
c.readLimit = limit
|
||||
}
|
||||
|
||||
// SetPingHandler sets the handler for ping messages received from the peer.
|
||||
// The default ping handler sends a pong to the peer.
|
||||
func (c *Conn) SetPingHandler(h func(string) error) {
|
||||
if h == nil {
|
||||
h = func(message string) error {
|
||||
c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
c.handlePing = h
|
||||
}
|
||||
|
||||
// SetPongHandler sets the handler for pong messages received from the peer.
|
||||
// The default pong handler does nothing.
|
||||
func (c *Conn) SetPongHandler(h func(string) error) {
|
||||
if h == nil {
|
||||
h = func(string) error { return nil }
|
||||
}
|
||||
c.handlePong = h
|
||||
}
|
||||
|
||||
// UnderlyingConn returns the internal net.Conn. This can be used to further
|
||||
// modifications to connection specific flags.
|
||||
func (c *Conn) UnderlyingConn() net.Conn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
|
||||
func FormatCloseMessage(closeCode int, text string) []byte {
|
||||
buf := make([]byte, 2+len(text))
|
||||
binary.BigEndian.PutUint16(buf, uint16(closeCode))
|
||||
copy(buf[2:], text)
|
||||
return buf
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application uses
|
||||
// the Upgrade function from an Upgrader object with a HTTP request handler
|
||||
// to get a pointer to a Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// if err = conn.WriteMessage(messageType, p); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// snippet shows how to echo messages using the NextWriter and NextReader
|
||||
// methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received ping and pong messages by invoking a callback
|
||||
// function set with SetPingHandler and SetPongHandler methods. These callback
|
||||
// functions can be invoked from the ReadMessage method, the NextReader method
|
||||
// or from a call to the data message reader returned from NextReader.
|
||||
//
|
||||
// Connections handle received close messages by returning an error from the
|
||||
// ReadMessage method, the NextReader method or from a call to the data message
|
||||
// reader returned from NextReader.
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections do not support concurrent calls to the write methods
|
||||
// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read
|
||||
// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do
|
||||
// support a concurrent reader and writer.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Read is Required
|
||||
//
|
||||
// The application must read the connection to process ping and close messages
|
||||
// sent from the peer. If the application is not otherwise interested in
|
||||
// messages from the peer, then the application should start a goroutine to read
|
||||
// and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and not equal to the
|
||||
// Host request header.
|
||||
//
|
||||
// An application can allow connections from any origin by specifying a
|
||||
// function that always returns true:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// CheckOrigin: func(r *http.Request) bool { return true },
|
||||
// }
|
||||
//
|
||||
// The deprecated Upgrade function does not enforce an origin policy. It's the
|
||||
// application's responsibility to check the Origin header before calling
|
||||
// Upgrade.
|
||||
package websocket
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON is deprecated, use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v to the connection.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON is deprecated, use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// Decode returns io.EOF when the message is empty or all whitespace.
|
||||
// Convert to io.ErrUnexpectedEOF so that application can distinguish
|
||||
// between an error reading the JSON value and the connection closing.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,247 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer
|
||||
// size is zero, then a default value of 4096 is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is set, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client.
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, the host in the Origin header must not be set or
|
||||
// must match the host of the request.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return u.Host == r.Host
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
var (
|
||||
netConn net.Conn
|
||||
br *bufio.Reader
|
||||
err error
|
||||
)
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var rw *bufio.ReadWriter
|
||||
netConn, rw, err = h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
br = rw.Reader
|
||||
|
||||
if br.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
p := c.writeBuf[:0]
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-Websocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// This function is deprecated, use websocket.Upgrader instead.
|
||||
//
|
||||
// The application is responsible for checking the request origin before
|
||||
// calling Upgrade. An example implementation of the same origin policy is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", 403)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains token.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
for _, v := range header[name] {
|
||||
for _, s := range strings.Split(v, ",") {
|
||||
if strings.EqualFold(value, strings.TrimSpace(s)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (C) 2010 nsf <no.smile.face@gmail.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,195 @@
|
|||
## An autocompletion daemon for the Go programming language
|
||||
|
||||
Gocode is a helper tool which is intended to be integrated with your source code editor, like vim, neovim and emacs. It provides several advanced capabilities, which currently includes:
|
||||
|
||||
- Context-sensitive autocompletion
|
||||
|
||||
It is called *daemon*, because it uses client/server architecture for caching purposes. In particular, it makes autocompletions very fast. Typical autocompletion time with warm cache is 30ms, which is barely noticeable.
|
||||
|
||||
Also watch the [demo screencast](http://nosmileface.ru/images/gocode-demo.swf).
|
||||
|
||||
![Gocode in vim](http://nosmileface.ru/images/gocode-screenshot.png)
|
||||
|
||||
![Gocode in emacs](http://nosmileface.ru/images/emacs-gocode.png)
|
||||
|
||||
### Setup
|
||||
|
||||
1. You should have a correctly installed Go compiler environment and your personal workspace ($GOPATH). If you have no idea what **$GOPATH** is, take a look [here](http://golang.org/doc/code.html). Please make sure that your **$GOPATH/bin** is available in your **$PATH**. This is important, because most editors assume that **gocode** binary is available in one of the directories, specified by your **$PATH** environment variable. Otherwise manually copy the **gocode** binary from **$GOPATH/bin** to a location which is part of your **$PATH** after getting it in step 2.
|
||||
|
||||
Do these steps only if you understand why you need to do them:
|
||||
|
||||
`export GOPATH=$HOME/goprojects`
|
||||
|
||||
`export PATH=$PATH:$GOPATH/bin`
|
||||
|
||||
2. Then you need to get the appropriate version of the gocode, for 6g/8g/5g compiler you can do this:
|
||||
|
||||
`go get -u github.com/nsf/gocode` (-u flag for "update")
|
||||
|
||||
Windows users should consider doing this instead:
|
||||
|
||||
`go get -u -ldflags -H=windowsgui github.com/nsf/gocode`
|
||||
|
||||
That way on the Windows OS gocode will be built as a GUI application and doing so solves hanging window issues with some of the editors.
|
||||
|
||||
3. Next steps are editor specific. See below.
|
||||
|
||||
### Vim setup
|
||||
|
||||
#### Vim manual installation
|
||||
|
||||
Note: As of go 1.5 there is no $GOROOT/misc/vim script. Suggested installation is via [vim-go plugin](https://github.com/fatih/vim-go).
|
||||
|
||||
In order to install vim scripts, you need to fulfill the following steps:
|
||||
|
||||
1. Install official Go vim scripts from **$GOROOT/misc/vim**. If you did that already, proceed to the step 2.
|
||||
|
||||
2. Install gocode vim scripts. Usually it's enough to do the following:
|
||||
|
||||
2.1. `vim/update.sh`
|
||||
|
||||
**update.sh** script does the following:
|
||||
|
||||
#!/bin/sh
|
||||
mkdir -p "$HOME/.vim/autoload"
|
||||
mkdir -p "$HOME/.vim/ftplugin/go"
|
||||
cp "${0%/*}/autoload/gocomplete.vim" "$HOME/.vim/autoload"
|
||||
cp "${0%/*}/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go"
|
||||
|
||||
2.2. Alternatively, you can create symlinks using symlink.sh script in order to avoid running update.sh after every gocode update.
|
||||
|
||||
**symlink.sh** script does the following:
|
||||
|
||||
#!/bin/sh
|
||||
cd "${0%/*}"
|
||||
ROOTDIR=`pwd`
|
||||
mkdir -p "$HOME/.vim/autoload"
|
||||
mkdir -p "$HOME/.vim/ftplugin/go"
|
||||
ln -s "$ROOTDIR/autoload/gocomplete.vim" "$HOME/.vim/autoload/"
|
||||
ln -s "$ROOTDIR/ftplugin/go/gocomplete.vim" "$HOME/.vim/ftplugin/go/"
|
||||
|
||||
3. Make sure vim has filetype plugin enabled. Simply add that to your **.vimrc**:
|
||||
|
||||
`filetype plugin on`
|
||||
|
||||
4. Autocompletion should work now. Use `<C-x><C-o>` for autocompletion (omnifunc autocompletion).
|
||||
|
||||
#### Using Vundle in Vim
|
||||
|
||||
Add the following line to your **.vimrc**:
|
||||
|
||||
`Plugin 'nsf/gocode', {'rtp': 'vim/'}`
|
||||
|
||||
And then update your packages by running `:PluginInstall`.
|
||||
|
||||
#### Using vim-plug in Vim
|
||||
|
||||
Add the following line to your **.vimrc**:
|
||||
|
||||
`Plug 'nsf/gocode', { 'rtp': 'vim', 'do': '~/.vim/plugged/gocode/vim/symlink.sh' }`
|
||||
|
||||
And then update your packages by running `:PlugInstall`.
|
||||
|
||||
#### Other
|
||||
|
||||
Alternatively take a look at the vundle/pathogen friendly repo: https://github.com/Blackrush/vim-gocode.
|
||||
|
||||
### Neovim setup
|
||||
#### Neovim manual installation
|
||||
|
||||
Neovim users should also follow `Vim manual installation`, except that you should goto `gocode/nvim` in step 2, and remember that, the Neovim configuration file is `~/.config/nvim/init.vim`.
|
||||
|
||||
#### Using Vundle in Neovim
|
||||
|
||||
Add the following line to your **init.vim**:
|
||||
|
||||
`Plugin 'nsf/gocode', {'rtp': 'nvim/'}`
|
||||
|
||||
And then update your packages by running `:PluginInstall`.
|
||||
|
||||
#### Using vim-plug in Neovim
|
||||
|
||||
Add the following line to your **init.vim**:
|
||||
|
||||
`Plug 'nsf/gocode', { 'rtp': 'nvim', 'do': '~/.config/nvim/plugged/gocode/nvim/symlink.sh' }`
|
||||
|
||||
And then update your packages by running `:PlugInstall`.
|
||||
|
||||
### Emacs setup
|
||||
|
||||
In order to install emacs script, you need to fulfill the following steps:
|
||||
|
||||
1. Install [auto-complete-mode](http://www.emacswiki.org/emacs/AutoComplete)
|
||||
|
||||
2. Copy **emacs/go-autocomplete.el** file from the gocode source distribution to a directory which is in your 'load-path' in emacs.
|
||||
|
||||
3. Add these lines to your **.emacs**:
|
||||
|
||||
(require 'go-autocomplete)
|
||||
(require 'auto-complete-config)
|
||||
(ac-config-default)
|
||||
|
||||
Also, there is an alternative plugin for emacs using company-mode. See `emacs-company/README` for installation instructions.
|
||||
|
||||
If you're a MacOSX user, you may find that script useful: https://github.com/purcell/exec-path-from-shell. It helps you with setting up the right environment variables as Go and gocode require it. By default it pulls the PATH, but don't forget to add the GOPATH as well, e.g.:
|
||||
|
||||
```
|
||||
(when (memq window-system '(mac ns))
|
||||
(exec-path-from-shell-initialize)
|
||||
(exec-path-from-shell-copy-env "GOPATH"))
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
You can change all available options using `gocode set` command. The config file uses json format and is usually stored somewhere in **~/.config/gocode** directory. On windows it's stored in the appropriate AppData folder. It's suggested to avoid modifying config file manually, do that using the `gocode set` command.
|
||||
|
||||
`gocode set` lists all options and their values.
|
||||
|
||||
`gocode set <option>` shows the value of that *option*.
|
||||
|
||||
`gocode set <option> <value>` sets the new *value* for that *option*.
|
||||
|
||||
- *propose-builtins*
|
||||
|
||||
A boolean option. If **true**, gocode will add built-in types, functions and constants to an autocompletion proposals. Default: **false**.
|
||||
|
||||
- *lib-path*
|
||||
|
||||
A string option. Allows you to add search paths for packages. By default, gocode only searches **$GOPATH/pkg/$GOOS_$GOARCH** and **$GOROOT/pkg/$GOOS_$GOARCH** in terms of previously existed environment variables. Also you can specify multiple paths using ':' (colon) as a separator (on Windows use semicolon ';'). The paths specified by *lib-path* are prepended to the default ones.
|
||||
|
||||
- *autobuild*
|
||||
|
||||
A boolean option. If **true**, gocode will try to automatically build out-of-date packages when their source files are modified, in order to obtain the freshest autocomplete results for them. This feature is experimental. Default: **false**.
|
||||
|
||||
- *force-debug-output*
|
||||
|
||||
A string option. If is not empty, gocode will forcefully redirect the logging into that file. Also forces enabling of the debug mode on the server side. Default: "" (empty).
|
||||
|
||||
- *package-lookup-mode*
|
||||
|
||||
A string option. If **go**, use standard Go package lookup rules. If **gb**, use gb-specific lookup rules. See https://github.com/constabulary/gb for details. Default: **go**.
|
||||
|
||||
- *close-timeout*
|
||||
|
||||
An integer option. If there have been no completion requests after this number of seconds, the gocode process will terminate. Defaults to 1800 (30 minutes).
|
||||
|
||||
### Debugging
|
||||
|
||||
If something went wrong, the first thing you may want to do is manually start the gocode daemon with a debug mode enabled and in a separate terminal window. It will show you all the stack traces, panics if any and additional info about autocompletion requests. Shutdown the daemon if it was already started and run a new one explicitly with a debug mode enabled:
|
||||
|
||||
`gocode close`
|
||||
|
||||
`gocode -s -debug`
|
||||
|
||||
Please, report bugs, feature suggestions and other rants to the [github issue tracker](http://github.com/nsf/gocode/issues) of this project.
|
||||
|
||||
### Developing
|
||||
|
||||
There is [Guide for IDE/editor plugin developers](docs/IDE_integration.md).
|
||||
|
||||
If you have troubles, please, contact me and I will try to do my best answering your questions. You can contact me via <a href="mailto:no.smile.face@gmail.com">email</a>. Or for short question find me on IRC: #go-nuts @ freenode.
|
||||
|
||||
### Misc
|
||||
|
||||
- It's a good idea to use the latest git version always. I'm trying to keep it in a working state.
|
||||
- Use `go install` (not `go build`) for building a local source tree. The objects in `pkg/` are needed for Gocode to work.
|
|
@ -0,0 +1,689 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// out_buffers
|
||||
//
|
||||
// Temporary structure for writing autocomplete response.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
// fields must be exported for RPC
|
||||
type candidate struct {
|
||||
Name string
|
||||
Type string
|
||||
Class decl_class
|
||||
}
|
||||
|
||||
type out_buffers struct {
|
||||
tmpbuf *bytes.Buffer
|
||||
candidates []candidate
|
||||
ctx *auto_complete_context
|
||||
tmpns map[string]bool
|
||||
ignorecase bool
|
||||
}
|
||||
|
||||
func new_out_buffers(ctx *auto_complete_context) *out_buffers {
|
||||
b := new(out_buffers)
|
||||
b.tmpbuf = bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.candidates = make([]candidate, 0, 64)
|
||||
b.ctx = ctx
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *out_buffers) Len() int {
|
||||
return len(b.candidates)
|
||||
}
|
||||
|
||||
func (b *out_buffers) Less(i, j int) bool {
|
||||
x := b.candidates[i]
|
||||
y := b.candidates[j]
|
||||
if x.Class == y.Class {
|
||||
return x.Name < y.Name
|
||||
}
|
||||
return x.Class < y.Class
|
||||
}
|
||||
|
||||
func (b *out_buffers) Swap(i, j int) {
|
||||
b.candidates[i], b.candidates[j] = b.candidates[j], b.candidates[i]
|
||||
}
|
||||
|
||||
func (b *out_buffers) append_decl(p, name string, decl *decl, class decl_class) {
|
||||
c1 := !g_config.ProposeBuiltins && decl.scope == g_universe_scope && decl.name != "Error"
|
||||
c2 := class != decl_invalid && decl.class != class
|
||||
c3 := class == decl_invalid && !has_prefix(name, p, b.ignorecase)
|
||||
c4 := !decl.matches()
|
||||
c5 := !check_type_expr(decl.typ)
|
||||
|
||||
if c1 || c2 || c3 || c4 || c5 {
|
||||
return
|
||||
}
|
||||
|
||||
decl.pretty_print_type(b.tmpbuf)
|
||||
b.candidates = append(b.candidates, candidate{
|
||||
Name: name,
|
||||
Type: b.tmpbuf.String(),
|
||||
Class: decl.class,
|
||||
})
|
||||
b.tmpbuf.Reset()
|
||||
}
|
||||
|
||||
func (b *out_buffers) append_embedded(p string, decl *decl, class decl_class) {
|
||||
if decl.embedded == nil {
|
||||
return
|
||||
}
|
||||
|
||||
first_level := false
|
||||
if b.tmpns == nil {
|
||||
// first level, create tmp namespace
|
||||
b.tmpns = make(map[string]bool)
|
||||
first_level = true
|
||||
|
||||
// add all children of the current decl to the namespace
|
||||
for _, c := range decl.children {
|
||||
b.tmpns[c.name] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, emb := range decl.embedded {
|
||||
typedecl := type_to_decl(emb, decl.scope)
|
||||
if typedecl == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// prevent infinite recursion here
|
||||
if typedecl.flags&decl_visited != 0 {
|
||||
continue
|
||||
}
|
||||
typedecl.flags |= decl_visited
|
||||
defer typedecl.clear_visited()
|
||||
|
||||
for _, c := range typedecl.children {
|
||||
if _, has := b.tmpns[c.name]; has {
|
||||
continue
|
||||
}
|
||||
b.append_decl(p, c.name, c, class)
|
||||
b.tmpns[c.name] = true
|
||||
}
|
||||
b.append_embedded(p, typedecl, class)
|
||||
}
|
||||
|
||||
if first_level {
|
||||
// remove tmp namespace
|
||||
b.tmpns = nil
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// auto_complete_context
|
||||
//
|
||||
// Context that holds cache structures for autocompletion needs. It
|
||||
// includes cache for packages and for main package files.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type auto_complete_context struct {
|
||||
current *auto_complete_file // currently edited file
|
||||
others []*decl_file_cache // other files of the current package
|
||||
pkg *scope
|
||||
|
||||
pcache package_cache // packages cache
|
||||
declcache *decl_cache // top-level declarations cache
|
||||
}
|
||||
|
||||
func new_auto_complete_context(pcache package_cache, declcache *decl_cache) *auto_complete_context {
|
||||
c := new(auto_complete_context)
|
||||
c.current = new_auto_complete_file("", declcache.context)
|
||||
c.pcache = pcache
|
||||
c.declcache = declcache
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) update_caches() {
|
||||
// temporary map for packages that we need to check for a cache expiration
|
||||
// map is used as a set of unique items to prevent double checks
|
||||
ps := make(map[string]*package_file_cache)
|
||||
|
||||
// collect import information from all of the files
|
||||
c.pcache.append_packages(ps, c.current.packages)
|
||||
c.others = get_other_package_files(c.current.name, c.current.package_name, c.declcache)
|
||||
for _, other := range c.others {
|
||||
c.pcache.append_packages(ps, other.packages)
|
||||
}
|
||||
|
||||
update_packages(ps)
|
||||
|
||||
// fix imports for all files
|
||||
fixup_packages(c.current.filescope, c.current.packages, c.pcache)
|
||||
for _, f := range c.others {
|
||||
fixup_packages(f.filescope, f.packages, c.pcache)
|
||||
}
|
||||
|
||||
// At this point we have collected all top level declarations, now we need to
|
||||
// merge them in the common package block.
|
||||
c.merge_decls()
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) merge_decls() {
|
||||
c.pkg = new_scope(g_universe_scope)
|
||||
merge_decls(c.current.filescope, c.pkg, c.current.decls)
|
||||
merge_decls_from_packages(c.pkg, c.current.packages, c.pcache)
|
||||
for _, f := range c.others {
|
||||
merge_decls(f.filescope, c.pkg, f.decls)
|
||||
merge_decls_from_packages(c.pkg, f.packages, c.pcache)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) make_decl_set(scope *scope) map[string]*decl {
|
||||
set := make(map[string]*decl, len(c.pkg.entities)*2)
|
||||
make_decl_set_recursive(set, scope)
|
||||
return set
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_candidates_from_set(set map[string]*decl, partial string, class decl_class, b *out_buffers) {
|
||||
for key, value := range set {
|
||||
if value == nil {
|
||||
continue
|
||||
}
|
||||
value.infer_type()
|
||||
b.append_decl(partial, key, value, class)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_candidates_from_decl(cc cursor_context, class decl_class, b *out_buffers) {
|
||||
// propose all children of a subject declaration and
|
||||
for _, decl := range cc.decl.children {
|
||||
if cc.decl.class == decl_package && !ast.IsExported(decl.name) {
|
||||
continue
|
||||
}
|
||||
if cc.struct_field {
|
||||
// if we're autocompleting struct field init, skip all methods
|
||||
if _, ok := decl.typ.(*ast.FuncType); ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
b.append_decl(cc.partial, decl.name, decl, class)
|
||||
}
|
||||
// propose all children of an underlying struct/interface type
|
||||
adecl := advance_to_struct_or_interface(cc.decl)
|
||||
if adecl != nil && adecl != cc.decl {
|
||||
for _, decl := range adecl.children {
|
||||
if decl.class == decl_var {
|
||||
b.append_decl(cc.partial, decl.name, decl, class)
|
||||
}
|
||||
}
|
||||
}
|
||||
// propose all children of its embedded types
|
||||
b.append_embedded(cc.partial, cc.decl, class)
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) get_import_candidates(partial string, b *out_buffers) {
|
||||
pkgdirs := g_daemon.context.pkg_dirs()
|
||||
resultSet := map[string]struct{}{}
|
||||
for _, pkgdir := range pkgdirs {
|
||||
// convert srcpath to pkgpath and get candidates
|
||||
get_import_candidates_dir(pkgdir, filepath.FromSlash(partial), b.ignorecase, resultSet)
|
||||
}
|
||||
for k := range resultSet {
|
||||
b.candidates = append(b.candidates, candidate{Name: k, Class: decl_import})
|
||||
}
|
||||
}
|
||||
|
||||
func get_import_candidates_dir(root, partial string, ignorecase bool, r map[string]struct{}) {
|
||||
var fpath string
|
||||
var match bool
|
||||
if strings.HasSuffix(partial, "/") {
|
||||
fpath = filepath.Join(root, partial)
|
||||
} else {
|
||||
fpath = filepath.Join(root, filepath.Dir(partial))
|
||||
match = true
|
||||
}
|
||||
fi := readdir(fpath)
|
||||
for i := range fi {
|
||||
name := fi[i].Name()
|
||||
rel, err := filepath.Rel(root, filepath.Join(fpath, name))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if match && !has_prefix(rel, partial, ignorecase) {
|
||||
continue
|
||||
} else if fi[i].IsDir() {
|
||||
get_import_candidates_dir(root, rel+string(filepath.Separator), ignorecase, r)
|
||||
} else {
|
||||
ext := filepath.Ext(name)
|
||||
if ext != ".a" {
|
||||
continue
|
||||
} else {
|
||||
rel = rel[0 : len(rel)-2]
|
||||
}
|
||||
r[vendorlessImportPath(filepath.ToSlash(rel))] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// returns three slices of the same length containing:
|
||||
// 1. apropos names
|
||||
// 2. apropos types (pretty-printed)
|
||||
// 3. apropos classes
|
||||
// and length of the part that should be replaced (if any)
|
||||
func (c *auto_complete_context) apropos(file []byte, filename string, cursor int) ([]candidate, int) {
|
||||
c.current.cursor = cursor
|
||||
c.current.name = filename
|
||||
|
||||
// Update caches and parse the current file.
|
||||
// This process is quite complicated, because I was trying to design it in a
|
||||
// concurrent fashion. Apparently I'm not really good at that. Hopefully
|
||||
// will be better in future.
|
||||
|
||||
// Ugly hack, but it actually may help in some cases. Insert a
|
||||
// semicolon right at the cursor location.
|
||||
filesemi := make([]byte, len(file)+1)
|
||||
copy(filesemi, file[:cursor])
|
||||
filesemi[cursor] = ';'
|
||||
copy(filesemi[cursor+1:], file[cursor:])
|
||||
|
||||
// Does full processing of the currently edited file (top-level declarations plus
|
||||
// active function).
|
||||
c.current.process_data(filesemi)
|
||||
|
||||
// Updates cache of other files and packages. See the function for details of
|
||||
// the process. At the end merges all the top-level declarations into the package
|
||||
// block.
|
||||
c.update_caches()
|
||||
|
||||
// And we're ready to Go. ;)
|
||||
|
||||
b := new_out_buffers(c)
|
||||
|
||||
partial := 0
|
||||
cc, ok := c.deduce_cursor_context(file, cursor)
|
||||
if !ok {
|
||||
var d *decl
|
||||
if ident, ok := cc.expr.(*ast.Ident); ok && g_config.UnimportedPackages {
|
||||
d = resolveKnownPackageIdent(ident.Name, c.current.name, c.current.context)
|
||||
}
|
||||
if d == nil {
|
||||
return nil, 0
|
||||
}
|
||||
cc.decl = d
|
||||
}
|
||||
|
||||
class := decl_invalid
|
||||
switch cc.partial {
|
||||
case "const":
|
||||
class = decl_const
|
||||
case "var":
|
||||
class = decl_var
|
||||
case "type":
|
||||
class = decl_type
|
||||
case "func":
|
||||
class = decl_func
|
||||
case "package":
|
||||
class = decl_package
|
||||
}
|
||||
|
||||
if cc.decl_import {
|
||||
c.get_import_candidates(cc.partial, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_import_candidates(cc.partial, b)
|
||||
}
|
||||
} else if cc.decl == nil {
|
||||
// In case if no declaraion is a subject of completion, propose all:
|
||||
set := c.make_decl_set(c.current.scope)
|
||||
c.get_candidates_from_set(set, cc.partial, class, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_candidates_from_set(set, cc.partial, class, b)
|
||||
}
|
||||
} else {
|
||||
c.get_candidates_from_decl(cc, class, b)
|
||||
if cc.partial != "" && len(b.candidates) == 0 {
|
||||
// as a fallback, try case insensitive approach
|
||||
b.ignorecase = true
|
||||
c.get_candidates_from_decl(cc, class, b)
|
||||
}
|
||||
}
|
||||
partial = len(cc.partial)
|
||||
|
||||
if len(b.candidates) == 0 {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
sort.Sort(b)
|
||||
return b.candidates, partial
|
||||
}
|
||||
|
||||
func update_packages(ps map[string]*package_file_cache) {
|
||||
// initiate package cache update
|
||||
done := make(chan bool)
|
||||
for _, p := range ps {
|
||||
go func(p *package_file_cache) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
done <- false
|
||||
}
|
||||
}()
|
||||
p.update_cache()
|
||||
done <- true
|
||||
}(p)
|
||||
}
|
||||
|
||||
// wait for its completion
|
||||
for _ = range ps {
|
||||
if !<-done {
|
||||
panic("One of the package cache updaters panicked")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func merge_decls(filescope *scope, pkg *scope, decls map[string]*decl) {
|
||||
for _, d := range decls {
|
||||
pkg.merge_decl(d)
|
||||
}
|
||||
filescope.parent = pkg
|
||||
}
|
||||
|
||||
func merge_decls_from_packages(pkgscope *scope, pkgs []package_import, pcache package_cache) {
|
||||
for _, p := range pkgs {
|
||||
path, alias := p.path, p.alias
|
||||
if alias != "." {
|
||||
continue
|
||||
}
|
||||
p := pcache[path].main
|
||||
if p == nil {
|
||||
continue
|
||||
}
|
||||
for _, d := range p.children {
|
||||
if ast.IsExported(d.name) {
|
||||
pkgscope.merge_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fixup_packages(filescope *scope, pkgs []package_import, pcache package_cache) {
|
||||
for _, p := range pkgs {
|
||||
path, alias := p.path, p.alias
|
||||
if alias == "" {
|
||||
alias = pcache[path].defalias
|
||||
}
|
||||
// skip packages that will be merged to the package scope
|
||||
if alias == "." {
|
||||
continue
|
||||
}
|
||||
filescope.replace_decl(alias, pcache[path].main)
|
||||
}
|
||||
}
|
||||
|
||||
func get_other_package_files(filename, packageName string, declcache *decl_cache) []*decl_file_cache {
|
||||
others := find_other_package_files(filename, packageName)
|
||||
|
||||
ret := make([]*decl_file_cache, len(others))
|
||||
done := make(chan *decl_file_cache)
|
||||
|
||||
for _, nm := range others {
|
||||
go func(name string) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
done <- nil
|
||||
}
|
||||
}()
|
||||
done <- declcache.get_and_update(name)
|
||||
}(nm)
|
||||
}
|
||||
|
||||
for i := range others {
|
||||
ret[i] = <-done
|
||||
if ret[i] == nil {
|
||||
panic("One of the decl cache updaters panicked")
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func find_other_package_files(filename, package_name string) []string {
|
||||
if filename == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir, file := filepath.Split(filename)
|
||||
files_in_dir, err := readdir_lstat(dir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, stat := range files_in_dir {
|
||||
ok, _ := filepath.Match("*.go", stat.Name())
|
||||
if !ok || stat.Name() == file {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
|
||||
out := make([]string, 0, count)
|
||||
for _, stat := range files_in_dir {
|
||||
const non_regular = os.ModeDir | os.ModeSymlink |
|
||||
os.ModeDevice | os.ModeNamedPipe | os.ModeSocket
|
||||
|
||||
ok, _ := filepath.Match("*.go", stat.Name())
|
||||
if !ok || stat.Name() == file || stat.Mode()&non_regular != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
abspath := filepath.Join(dir, stat.Name())
|
||||
if file_package_name(abspath) == package_name {
|
||||
n := len(out)
|
||||
out = out[:n+1]
|
||||
out[n] = abspath
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func file_package_name(filename string) string {
|
||||
file, _ := parser.ParseFile(token.NewFileSet(), filename, nil, parser.PackageClauseOnly)
|
||||
return file.Name.Name
|
||||
}
|
||||
|
||||
func make_decl_set_recursive(set map[string]*decl, scope *scope) {
|
||||
for name, ent := range scope.entities {
|
||||
if _, ok := set[name]; !ok {
|
||||
set[name] = ent
|
||||
}
|
||||
}
|
||||
if scope.parent != nil {
|
||||
make_decl_set_recursive(set, scope.parent)
|
||||
}
|
||||
}
|
||||
|
||||
func check_func_field_list(f *ast.FieldList) bool {
|
||||
if f == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, field := range f.List {
|
||||
if !check_type_expr(field.Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// checks for a type expression correctness, it the type expression has
|
||||
// ast.BadExpr somewhere, returns false, otherwise true
|
||||
func check_type_expr(e ast.Expr) bool {
|
||||
switch t := e.(type) {
|
||||
case *ast.StarExpr:
|
||||
return check_type_expr(t.X)
|
||||
case *ast.ArrayType:
|
||||
return check_type_expr(t.Elt)
|
||||
case *ast.SelectorExpr:
|
||||
return check_type_expr(t.X)
|
||||
case *ast.FuncType:
|
||||
a := check_func_field_list(t.Params)
|
||||
b := check_func_field_list(t.Results)
|
||||
return a && b
|
||||
case *ast.MapType:
|
||||
a := check_type_expr(t.Key)
|
||||
b := check_type_expr(t.Value)
|
||||
return a && b
|
||||
case *ast.Ellipsis:
|
||||
return check_type_expr(t.Elt)
|
||||
case *ast.ChanType:
|
||||
return check_type_expr(t.Value)
|
||||
case *ast.BadExpr:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// Status output
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type decl_slice []*decl
|
||||
|
||||
func (s decl_slice) Less(i, j int) bool {
|
||||
if s[i].class != s[j].class {
|
||||
return s[i].name < s[j].name
|
||||
}
|
||||
return s[i].class < s[j].class
|
||||
}
|
||||
func (s decl_slice) Len() int { return len(s) }
|
||||
func (s decl_slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
const (
|
||||
color_red = "\033[0;31m"
|
||||
color_red_bold = "\033[1;31m"
|
||||
color_green = "\033[0;32m"
|
||||
color_green_bold = "\033[1;32m"
|
||||
color_yellow = "\033[0;33m"
|
||||
color_yellow_bold = "\033[1;33m"
|
||||
color_blue = "\033[0;34m"
|
||||
color_blue_bold = "\033[1;34m"
|
||||
color_magenta = "\033[0;35m"
|
||||
color_magenta_bold = "\033[1;35m"
|
||||
color_cyan = "\033[0;36m"
|
||||
color_cyan_bold = "\033[1;36m"
|
||||
color_white = "\033[0;37m"
|
||||
color_white_bold = "\033[1;37m"
|
||||
color_none = "\033[0m"
|
||||
)
|
||||
|
||||
var g_decl_class_to_color = [...]string{
|
||||
decl_const: color_white_bold,
|
||||
decl_var: color_magenta,
|
||||
decl_type: color_cyan,
|
||||
decl_func: color_green,
|
||||
decl_package: color_red,
|
||||
decl_methods_stub: color_red,
|
||||
}
|
||||
|
||||
var g_decl_class_to_string_status = [...]string{
|
||||
decl_const: " const",
|
||||
decl_var: " var",
|
||||
decl_type: " type",
|
||||
decl_func: " func",
|
||||
decl_package: "package",
|
||||
decl_methods_stub: " stub",
|
||||
}
|
||||
|
||||
func (c *auto_complete_context) status() string {
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 4096))
|
||||
fmt.Fprintf(buf, "Server's GOMAXPROCS == %d\n", runtime.GOMAXPROCS(0))
|
||||
fmt.Fprintf(buf, "\nPackage cache contains %d entries\n", len(c.pcache))
|
||||
fmt.Fprintf(buf, "\nListing these entries:\n")
|
||||
for _, mod := range c.pcache {
|
||||
fmt.Fprintf(buf, "\tname: %s (default alias: %s)\n", mod.name, mod.defalias)
|
||||
fmt.Fprintf(buf, "\timports %d declarations and %d packages\n", len(mod.main.children), len(mod.others))
|
||||
if mod.mtime == -1 {
|
||||
fmt.Fprintf(buf, "\tthis package stays in cache forever (built-in package)\n")
|
||||
} else {
|
||||
mtime := time.Unix(0, mod.mtime)
|
||||
fmt.Fprintf(buf, "\tlast modification time: %s\n", mtime)
|
||||
}
|
||||
fmt.Fprintf(buf, "\n")
|
||||
}
|
||||
if c.current.name != "" {
|
||||
fmt.Fprintf(buf, "Last edited file: %s (package: %s)\n", c.current.name, c.current.package_name)
|
||||
if len(c.others) > 0 {
|
||||
fmt.Fprintf(buf, "\nOther files from the current package:\n")
|
||||
}
|
||||
for _, f := range c.others {
|
||||
fmt.Fprintf(buf, "\t%s\n", f.name)
|
||||
}
|
||||
fmt.Fprintf(buf, "\nListing declarations from files:\n")
|
||||
|
||||
const status_decls = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + "\n"
|
||||
const status_decls_children = "\t%s%s" + color_none + " " + color_yellow + "%s" + color_none + " (%d)\n"
|
||||
|
||||
fmt.Fprintf(buf, "\n%s:\n", c.current.name)
|
||||
ds := make(decl_slice, len(c.current.decls))
|
||||
i := 0
|
||||
for _, d := range c.current.decls {
|
||||
ds[i] = d
|
||||
i++
|
||||
}
|
||||
sort.Sort(ds)
|
||||
for _, d := range ds {
|
||||
if len(d.children) > 0 {
|
||||
fmt.Fprintf(buf, status_decls_children,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name, len(d.children))
|
||||
} else {
|
||||
fmt.Fprintf(buf, status_decls,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range c.others {
|
||||
fmt.Fprintf(buf, "\n%s:\n", f.name)
|
||||
ds = make(decl_slice, len(f.decls))
|
||||
i = 0
|
||||
for _, d := range f.decls {
|
||||
ds[i] = d
|
||||
i++
|
||||
}
|
||||
sort.Sort(ds)
|
||||
for _, d := range ds {
|
||||
if len(d.children) > 0 {
|
||||
fmt.Fprintf(buf, status_decls_children,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name, len(d.children))
|
||||
} else {
|
||||
fmt.Fprintf(buf, status_decls,
|
||||
g_decl_class_to_color[d.class],
|
||||
g_decl_class_to_string_status[d.class],
|
||||
d.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -0,0 +1,418 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"log"
|
||||
)
|
||||
|
||||
func parse_decl_list(fset *token.FileSet, data []byte) ([]ast.Decl, error) {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("package p;")
|
||||
buf.Write(data)
|
||||
file, err := parser.ParseFile(fset, "", buf.Bytes(), parser.AllErrors)
|
||||
if err != nil {
|
||||
return file.Decls, err
|
||||
}
|
||||
return file.Decls, nil
|
||||
}
|
||||
|
||||
func log_parse_error(intro string, err error) {
|
||||
if el, ok := err.(scanner.ErrorList); ok {
|
||||
log.Printf("%s:", intro)
|
||||
for _, er := range el {
|
||||
log.Printf(" %s", er)
|
||||
}
|
||||
} else {
|
||||
log.Printf("%s: %s", intro, err)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// auto_complete_file
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type auto_complete_file struct {
|
||||
name string
|
||||
package_name string
|
||||
|
||||
decls map[string]*decl
|
||||
packages []package_import
|
||||
filescope *scope
|
||||
scope *scope
|
||||
|
||||
cursor int // for current file buffer only
|
||||
fset *token.FileSet
|
||||
context *package_lookup_context
|
||||
}
|
||||
|
||||
func new_auto_complete_file(name string, context *package_lookup_context) *auto_complete_file {
|
||||
p := new(auto_complete_file)
|
||||
p.name = name
|
||||
p.cursor = -1
|
||||
p.fset = token.NewFileSet()
|
||||
p.context = context
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) offset(p token.Pos) int {
|
||||
const fixlen = len("package p;")
|
||||
return f.fset.Position(p).Offset - fixlen
|
||||
}
|
||||
|
||||
// this one is used for current file buffer exclusively
|
||||
func (f *auto_complete_file) process_data(data []byte) {
|
||||
cur, filedata, block := rip_off_decl(data, f.cursor)
|
||||
file, err := parser.ParseFile(f.fset, "", filedata, parser.AllErrors)
|
||||
if err != nil && *g_debug {
|
||||
log_parse_error("Error parsing input file (outer block)", err)
|
||||
}
|
||||
f.package_name = package_name(file)
|
||||
|
||||
f.decls = make(map[string]*decl)
|
||||
f.packages = collect_package_imports(f.name, file.Decls, f.context)
|
||||
f.filescope = new_scope(nil)
|
||||
f.scope = f.filescope
|
||||
|
||||
for _, d := range file.Decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
|
||||
// process all top-level declarations
|
||||
for _, decl := range file.Decls {
|
||||
append_to_top_decls(f.decls, decl, f.scope)
|
||||
}
|
||||
if block != nil {
|
||||
// process local function as top-level declaration
|
||||
decls, err := parse_decl_list(f.fset, block)
|
||||
if err != nil && *g_debug {
|
||||
log_parse_error("Error parsing input file (inner block)", err)
|
||||
}
|
||||
|
||||
for _, d := range decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
|
||||
for _, decl := range decls {
|
||||
append_to_top_decls(f.decls, decl, f.scope)
|
||||
}
|
||||
|
||||
// process function internals
|
||||
f.cursor = cur
|
||||
for _, decl := range decls {
|
||||
f.process_decl_locals(decl)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_decl_locals(decl ast.Decl) {
|
||||
switch t := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
if f.cursor_in(t.Body) {
|
||||
s := f.scope
|
||||
f.scope = new_scope(f.scope)
|
||||
|
||||
f.process_field_list(t.Recv, s)
|
||||
f.process_field_list(t.Type.Params, s)
|
||||
f.process_field_list(t.Type.Results, s)
|
||||
f.process_block_stmt(t.Body)
|
||||
}
|
||||
default:
|
||||
v := new(func_lit_visitor)
|
||||
v.ctx = f
|
||||
ast.Walk(v, decl)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_decl(decl ast.Decl) {
|
||||
if t, ok := decl.(*ast.GenDecl); ok && f.offset(t.TokPos) > f.cursor {
|
||||
return
|
||||
}
|
||||
prevscope := f.scope
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
if class != decl_type {
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
}
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, 0, typ, v, vi, prevscope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_block_stmt(block *ast.BlockStmt) {
|
||||
if block != nil && f.cursor_in(block) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
for _, stmt := range block.List {
|
||||
f.process_stmt(stmt)
|
||||
}
|
||||
|
||||
// hack to process all func literals
|
||||
v := new(func_lit_visitor)
|
||||
v.ctx = f
|
||||
ast.Walk(v, block)
|
||||
}
|
||||
}
|
||||
|
||||
type func_lit_visitor struct {
|
||||
ctx *auto_complete_file
|
||||
}
|
||||
|
||||
func (v *func_lit_visitor) Visit(node ast.Node) ast.Visitor {
|
||||
if t, ok := node.(*ast.FuncLit); ok && v.ctx.cursor_in(t.Body) {
|
||||
s := v.ctx.scope
|
||||
v.ctx.scope = new_scope(v.ctx.scope)
|
||||
|
||||
v.ctx.process_field_list(t.Type.Params, s)
|
||||
v.ctx.process_field_list(t.Type.Results, s)
|
||||
v.ctx.process_block_stmt(t.Body)
|
||||
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_stmt(stmt ast.Stmt) {
|
||||
switch t := stmt.(type) {
|
||||
case *ast.DeclStmt:
|
||||
f.process_decl(t.Decl)
|
||||
case *ast.AssignStmt:
|
||||
f.process_assign_stmt(t)
|
||||
case *ast.IfStmt:
|
||||
if f.cursor_in_if_head(t) {
|
||||
f.process_stmt(t.Init)
|
||||
} else if f.cursor_in_if_stmt(t) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
f.process_stmt(t.Init)
|
||||
f.process_block_stmt(t.Body)
|
||||
f.process_stmt(t.Else)
|
||||
}
|
||||
case *ast.BlockStmt:
|
||||
f.process_block_stmt(t)
|
||||
case *ast.RangeStmt:
|
||||
f.process_range_stmt(t)
|
||||
case *ast.ForStmt:
|
||||
if f.cursor_in_for_head(t) {
|
||||
f.process_stmt(t.Init)
|
||||
} else if f.cursor_in(t.Body) {
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(t.Init)
|
||||
f.process_block_stmt(t.Body)
|
||||
}
|
||||
case *ast.SwitchStmt:
|
||||
f.process_switch_stmt(t)
|
||||
case *ast.TypeSwitchStmt:
|
||||
f.process_type_switch_stmt(t)
|
||||
case *ast.SelectStmt:
|
||||
f.process_select_stmt(t)
|
||||
case *ast.LabeledStmt:
|
||||
f.process_stmt(t.Stmt)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_select_stmt(a *ast.SelectStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
var last_cursor_after *ast.CommClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CommClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
|
||||
if last_cursor_after != nil {
|
||||
if last_cursor_after.Comm != nil {
|
||||
//if lastCursorAfter.Lhs != nil && lastCursorAfter.Tok == token.DEFINE {
|
||||
if astmt, ok := last_cursor_after.Comm.(*ast.AssignStmt); ok && astmt.Tok == token.DEFINE {
|
||||
vname := astmt.Lhs[0].(*ast.Ident).Name
|
||||
v := new_decl_var(vname, nil, astmt.Rhs[0], -1, prevscope)
|
||||
f.scope.add_named_decl(v)
|
||||
}
|
||||
}
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_type_switch_stmt(a *ast.TypeSwitchStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(a.Init)
|
||||
// type var
|
||||
var tv *decl
|
||||
if a, ok := a.Assign.(*ast.AssignStmt); ok {
|
||||
lhs := a.Lhs
|
||||
rhs := a.Rhs
|
||||
if lhs != nil && len(lhs) == 1 {
|
||||
tvname := lhs[0].(*ast.Ident).Name
|
||||
tv = new_decl_var(tvname, nil, rhs[0], -1, prevscope)
|
||||
}
|
||||
}
|
||||
|
||||
var last_cursor_after *ast.CaseClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
|
||||
if last_cursor_after != nil {
|
||||
if tv != nil {
|
||||
if last_cursor_after.List != nil && len(last_cursor_after.List) == 1 {
|
||||
tv.typ = last_cursor_after.List[0]
|
||||
tv.value = nil
|
||||
}
|
||||
f.scope.add_named_decl(tv)
|
||||
}
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_switch_stmt(a *ast.SwitchStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
f.scope, _ = advance_scope(f.scope)
|
||||
|
||||
f.process_stmt(a.Init)
|
||||
var last_cursor_after *ast.CaseClause
|
||||
for _, s := range a.Body.List {
|
||||
if cc := s.(*ast.CaseClause); f.cursor > f.offset(cc.Colon) {
|
||||
last_cursor_after = cc
|
||||
}
|
||||
}
|
||||
if last_cursor_after != nil {
|
||||
for _, s := range last_cursor_after.Body {
|
||||
f.process_stmt(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_range_stmt(a *ast.RangeStmt) {
|
||||
if !f.cursor_in(a.Body) {
|
||||
return
|
||||
}
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
if a.Tok == token.DEFINE {
|
||||
if t, ok := a.Key.(*ast.Ident); ok {
|
||||
d := new_decl_var(t.Name, nil, a.X, 0, prevscope)
|
||||
if d != nil {
|
||||
d.flags |= decl_rangevar
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
|
||||
if a.Value != nil {
|
||||
if t, ok := a.Value.(*ast.Ident); ok {
|
||||
d := new_decl_var(t.Name, nil, a.X, 1, prevscope)
|
||||
if d != nil {
|
||||
d.flags |= decl_rangevar
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f.process_block_stmt(a.Body)
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_assign_stmt(a *ast.AssignStmt) {
|
||||
if a.Tok != token.DEFINE || f.offset(a.TokPos) > f.cursor {
|
||||
return
|
||||
}
|
||||
|
||||
names := make([]*ast.Ident, len(a.Lhs))
|
||||
for i, name := range a.Lhs {
|
||||
id, ok := name.(*ast.Ident)
|
||||
if !ok {
|
||||
// something is wrong, just ignore the whole stmt
|
||||
return
|
||||
}
|
||||
names[i] = id
|
||||
}
|
||||
|
||||
var prevscope *scope
|
||||
f.scope, prevscope = advance_scope(f.scope)
|
||||
|
||||
pack := decl_pack{names, nil, a.Rhs}
|
||||
for i, name := range pack.names {
|
||||
typ, v, vi := pack.type_value_index(i)
|
||||
d := new_decl_var(name.Name, typ, v, vi, prevscope)
|
||||
if d == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) process_field_list(field_list *ast.FieldList, s *scope) {
|
||||
if field_list != nil {
|
||||
decls := ast_field_list_to_decls(field_list, decl_var, 0, s, false)
|
||||
for _, d := range decls {
|
||||
f.scope.add_named_decl(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_if_head(s *ast.IfStmt) bool {
|
||||
if f.cursor > f.offset(s.If) && f.cursor <= f.offset(s.Body.Lbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_if_stmt(s *ast.IfStmt) bool {
|
||||
if f.cursor > f.offset(s.If) {
|
||||
// magic -10 comes from auto_complete_file.offset method, see
|
||||
// len() expr in there
|
||||
if f.offset(s.End()) == -10 || f.cursor < f.offset(s.End()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in_for_head(s *ast.ForStmt) bool {
|
||||
if f.cursor > f.offset(s.For) && f.cursor <= f.offset(s.Body.Lbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *auto_complete_file) cursor_in(block *ast.BlockStmt) bool {
|
||||
if f.cursor == -1 || block == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if f.cursor > f.offset(block.Lbrace) && f.cursor <= f.offset(block.Rbrace) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,182 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func do_client() int {
|
||||
addr := *g_addr
|
||||
if *g_sock == "unix" {
|
||||
addr = get_socket_filename()
|
||||
}
|
||||
|
||||
// client
|
||||
client, err := rpc.Dial(*g_sock, addr)
|
||||
if err != nil {
|
||||
if *g_sock == "unix" && file_exists(addr) {
|
||||
os.Remove(addr)
|
||||
}
|
||||
|
||||
err = try_run_server()
|
||||
if err != nil {
|
||||
fmt.Printf("%s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
client, err = try_to_connect(*g_sock, addr)
|
||||
if err != nil {
|
||||
fmt.Printf("%s\n", err.Error())
|
||||
return 1
|
||||
}
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
if flag.NArg() > 0 {
|
||||
switch flag.Arg(0) {
|
||||
case "autocomplete":
|
||||
cmd_auto_complete(client)
|
||||
case "close":
|
||||
cmd_close(client)
|
||||
case "status":
|
||||
cmd_status(client)
|
||||
case "drop-cache":
|
||||
cmd_drop_cache(client)
|
||||
case "set":
|
||||
cmd_set(client)
|
||||
default:
|
||||
fmt.Printf("unknown argument: %q, try running \"gocode -h\"\n", flag.Arg(0))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func try_run_server() error {
|
||||
path := get_executable_filename()
|
||||
args := []string{os.Args[0], "-s", "-sock", *g_sock, "-addr", *g_addr}
|
||||
cwd, _ := os.Getwd()
|
||||
|
||||
var err error
|
||||
stdin, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stdout, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stderr, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
procattr := os.ProcAttr{Dir: cwd, Env: os.Environ(), Files: []*os.File{stdin, stdout, stderr}}
|
||||
p, err := os.StartProcess(path, args, &procattr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.Release()
|
||||
}
|
||||
|
||||
func try_to_connect(network, address string) (client *rpc.Client, err error) {
|
||||
t := 0
|
||||
for {
|
||||
client, err = rpc.Dial(network, address)
|
||||
if err != nil && t < 1000 {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
t += 10
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prepare_file_filename_cursor() ([]byte, string, int) {
|
||||
var file []byte
|
||||
var err error
|
||||
|
||||
if *g_input != "" {
|
||||
file, err = ioutil.ReadFile(*g_input)
|
||||
} else {
|
||||
file, err = ioutil.ReadAll(os.Stdin)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
var skipped int
|
||||
file, skipped = filter_out_shebang(file)
|
||||
|
||||
filename := *g_input
|
||||
cursor := -1
|
||||
|
||||
offset := ""
|
||||
switch flag.NArg() {
|
||||
case 2:
|
||||
offset = flag.Arg(1)
|
||||
case 3:
|
||||
filename = flag.Arg(1) // Override default filename
|
||||
offset = flag.Arg(2)
|
||||
}
|
||||
|
||||
if offset != "" {
|
||||
if offset[0] == 'c' || offset[0] == 'C' {
|
||||
cursor, _ = strconv.Atoi(offset[1:])
|
||||
cursor = char_to_byte_offset(file, cursor)
|
||||
} else {
|
||||
cursor, _ = strconv.Atoi(offset)
|
||||
}
|
||||
}
|
||||
|
||||
cursor -= skipped
|
||||
if filename != "" && !filepath.IsAbs(filename) {
|
||||
cwd, _ := os.Getwd()
|
||||
filename = filepath.Join(cwd, filename)
|
||||
}
|
||||
return file, filename, cursor
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// commands
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func cmd_status(c *rpc.Client) {
|
||||
fmt.Printf("%s\n", client_status(c, 0))
|
||||
}
|
||||
|
||||
func cmd_auto_complete(c *rpc.Client) {
|
||||
context := pack_build_context(&build.Default)
|
||||
file, filename, cursor := prepare_file_filename_cursor()
|
||||
f := get_formatter(*g_format)
|
||||
f.write_candidates(client_auto_complete(c, file, filename, cursor, context))
|
||||
}
|
||||
|
||||
func cmd_close(c *rpc.Client) {
|
||||
client_close(c, 0)
|
||||
}
|
||||
|
||||
func cmd_drop_cache(c *rpc.Client) {
|
||||
client_drop_cache(c, 0)
|
||||
}
|
||||
|
||||
func cmd_set(c *rpc.Client) {
|
||||
switch flag.NArg() {
|
||||
case 1:
|
||||
fmt.Print(client_set(c, "\x00", "\x00"))
|
||||
case 2:
|
||||
fmt.Print(client_set(c, flag.Arg(1), "\x00"))
|
||||
case 3:
|
||||
fmt.Print(client_set(c, flag.Arg(1), flag.Arg(2)))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// config
|
||||
//
|
||||
// Structure represents persistent config storage of the gocode daemon. Usually
|
||||
// the config is located somewhere in ~/.config/gocode directory.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type config struct {
|
||||
ProposeBuiltins bool `json:"propose-builtins"`
|
||||
LibPath string `json:"lib-path"`
|
||||
CustomPkgPrefix string `json:"custom-pkg-prefix"`
|
||||
CustomVendorDir string `json:"custom-vendor-dir"`
|
||||
Autobuild bool `json:"autobuild"`
|
||||
ForceDebugOutput string `json:"force-debug-output"`
|
||||
PackageLookupMode string `json:"package-lookup-mode"`
|
||||
CloseTimeout int `json:"close-timeout"`
|
||||
UnimportedPackages bool `json:"unimported-packages"`
|
||||
}
|
||||
|
||||
var g_config = config{
|
||||
ProposeBuiltins: false,
|
||||
LibPath: "",
|
||||
CustomPkgPrefix: "",
|
||||
Autobuild: false,
|
||||
ForceDebugOutput: "",
|
||||
PackageLookupMode: "go",
|
||||
CloseTimeout: 1800,
|
||||
UnimportedPackages: false,
|
||||
}
|
||||
|
||||
var g_string_to_bool = map[string]bool{
|
||||
"t": true,
|
||||
"true": true,
|
||||
"y": true,
|
||||
"yes": true,
|
||||
"on": true,
|
||||
"1": true,
|
||||
"f": false,
|
||||
"false": false,
|
||||
"n": false,
|
||||
"no": false,
|
||||
"off": false,
|
||||
"0": false,
|
||||
}
|
||||
|
||||
func set_value(v reflect.Value, value string) {
|
||||
switch t := v; t.Kind() {
|
||||
case reflect.Bool:
|
||||
v, ok := g_string_to_bool[value]
|
||||
if ok {
|
||||
t.SetBool(v)
|
||||
}
|
||||
case reflect.String:
|
||||
t.SetString(value)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
v, err := strconv.ParseInt(value, 10, 64)
|
||||
if err == nil {
|
||||
t.SetInt(v)
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err == nil {
|
||||
t.SetFloat(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func list_value(v reflect.Value, name string, w io.Writer) {
|
||||
switch t := v; t.Kind() {
|
||||
case reflect.Bool:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Bool())
|
||||
case reflect.String:
|
||||
fmt.Fprintf(w, "%s \"%v\"\n", name, t.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Int())
|
||||
case reflect.Float32, reflect.Float64:
|
||||
fmt.Fprintf(w, "%s %v\n", name, t.Float())
|
||||
}
|
||||
}
|
||||
|
||||
func (this *config) list() string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
name := typ.Field(i).Tag.Get("json")
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (this *config) list_option(name string) string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
nm := typ.Field(i).Tag.Get("json")
|
||||
if nm == name {
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (this *config) set_option(name, value string) string {
|
||||
str, typ := this.value_and_type()
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
for i := 0; i < str.NumField(); i++ {
|
||||
v := str.Field(i)
|
||||
nm := typ.Field(i).Tag.Get("json")
|
||||
if nm == name {
|
||||
set_value(v, value)
|
||||
list_value(v, name, buf)
|
||||
}
|
||||
}
|
||||
this.write()
|
||||
return buf.String()
|
||||
|
||||
}
|
||||
|
||||
func (this *config) value_and_type() (reflect.Value, reflect.Type) {
|
||||
v := reflect.ValueOf(this).Elem()
|
||||
return v, v.Type()
|
||||
}
|
||||
|
||||
func (this *config) write() error {
|
||||
data, err := json.Marshal(this)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure config dir exists
|
||||
dir := config_dir()
|
||||
if !file_exists(dir) {
|
||||
os.MkdirAll(dir, 0755)
|
||||
}
|
||||
|
||||
f, err := os.Create(config_file())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (this *config) read() error {
|
||||
data, err := ioutil.ReadFile(config_file())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(data, this)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,557 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"log"
|
||||
)
|
||||
|
||||
type cursor_context struct {
|
||||
decl *decl
|
||||
partial string
|
||||
struct_field bool
|
||||
decl_import bool
|
||||
|
||||
// store expression that was supposed to be deduced to "decl", however
|
||||
// if decl is nil, then deduction failed, we could try to resolve it to
|
||||
// unimported package instead
|
||||
expr ast.Expr
|
||||
}
|
||||
|
||||
type token_iterator struct {
|
||||
tokens []token_item
|
||||
token_index int
|
||||
}
|
||||
|
||||
type token_item struct {
|
||||
off int
|
||||
tok token.Token
|
||||
lit string
|
||||
}
|
||||
|
||||
func (i token_item) literal() string {
|
||||
if i.tok.IsLiteral() {
|
||||
return i.lit
|
||||
} else {
|
||||
return i.tok.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func new_token_iterator(src []byte, cursor int) token_iterator {
|
||||
tokens := make([]token_item, 0, 1000)
|
||||
var s scanner.Scanner
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(src))
|
||||
s.Init(file, src, nil, 0)
|
||||
for {
|
||||
pos, tok, lit := s.Scan()
|
||||
off := fset.Position(pos).Offset
|
||||
if tok == token.EOF || cursor <= off {
|
||||
break
|
||||
}
|
||||
tokens = append(tokens, token_item{
|
||||
off: off,
|
||||
tok: tok,
|
||||
lit: lit,
|
||||
})
|
||||
}
|
||||
return token_iterator{
|
||||
tokens: tokens,
|
||||
token_index: len(tokens) - 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (this *token_iterator) token() token_item {
|
||||
return this.tokens[this.token_index]
|
||||
}
|
||||
|
||||
func (this *token_iterator) go_back() bool {
|
||||
if this.token_index <= 0 {
|
||||
return false
|
||||
}
|
||||
this.token_index--
|
||||
return true
|
||||
}
|
||||
|
||||
var bracket_pairs_map = map[token.Token]token.Token{
|
||||
token.RPAREN: token.LPAREN,
|
||||
token.RBRACK: token.LBRACK,
|
||||
token.RBRACE: token.LBRACE,
|
||||
}
|
||||
|
||||
func (ti *token_iterator) skip_to_left(left, right token.Token) bool {
|
||||
if ti.token().tok == left {
|
||||
return true
|
||||
}
|
||||
balance := 1
|
||||
for balance != 0 {
|
||||
if !ti.go_back() {
|
||||
return false
|
||||
}
|
||||
switch ti.token().tok {
|
||||
case right:
|
||||
balance++
|
||||
case left:
|
||||
balance--
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// when the cursor is at the ')' or ']' or '}', move the cursor to an opposite
|
||||
// bracket pair, this functions takes nested bracket pairs into account
|
||||
func (this *token_iterator) skip_to_balanced_pair() bool {
|
||||
right := this.token().tok
|
||||
left := bracket_pairs_map[right]
|
||||
return this.skip_to_left(left, right)
|
||||
}
|
||||
|
||||
// Move the cursor to the open brace of the current block, taking nested blocks
|
||||
// into account.
|
||||
func (this *token_iterator) skip_to_left_curly() bool {
|
||||
return this.skip_to_left(token.LBRACE, token.RBRACE)
|
||||
}
|
||||
|
||||
// Extract the type expression right before the enclosing curly bracket block.
|
||||
// Examples (# - the cursor):
|
||||
// &lib.Struct{Whatever: 1, Hel#} // returns "lib.Struct"
|
||||
// X{#} // returns X
|
||||
// The idea is that we check if this type expression is a type and it is, we
|
||||
// can apply special filtering for autocompletion results.
|
||||
// Sadly, this doesn't cover anonymous structs.
|
||||
func (ti *token_iterator) extract_struct_type() string {
|
||||
if !ti.skip_to_left_curly() {
|
||||
return ""
|
||||
}
|
||||
if !ti.go_back() {
|
||||
return ""
|
||||
}
|
||||
if ti.token().tok != token.IDENT {
|
||||
return ""
|
||||
}
|
||||
b := ti.token().literal()
|
||||
if !ti.go_back() {
|
||||
return b
|
||||
}
|
||||
if ti.token().tok != token.PERIOD {
|
||||
return b
|
||||
}
|
||||
if !ti.go_back() {
|
||||
return b
|
||||
}
|
||||
if ti.token().tok != token.IDENT {
|
||||
return b
|
||||
}
|
||||
return ti.token().literal() + "." + b
|
||||
}
|
||||
|
||||
// Starting from the token under the cursor move back and extract something
|
||||
// that resembles a valid Go primary expression. Examples of primary expressions
|
||||
// from Go spec:
|
||||
// x
|
||||
// 2
|
||||
// (s + ".txt")
|
||||
// f(3.1415, true)
|
||||
// Point{1, 2}
|
||||
// m["foo"]
|
||||
// s[i : j + 1]
|
||||
// obj.color
|
||||
// f.p[i].x()
|
||||
//
|
||||
// As you can see we can move through all of them using balanced bracket
|
||||
// matching and applying simple rules
|
||||
// E.g.
|
||||
// Point{1, 2}.m["foo"].s[i : j + 1].MethodCall(a, func(a, b int) int { return a + b }).
|
||||
// Can be seen as:
|
||||
// Point{ }.m[ ].s[ ].MethodCall( ).
|
||||
// Which boils the rules down to these connected via dots:
|
||||
// ident
|
||||
// ident[]
|
||||
// ident{}
|
||||
// ident()
|
||||
// Of course there are also slightly more complicated rules for brackets:
|
||||
// ident{}.ident()[5][4](), etc.
|
||||
func (this *token_iterator) extract_go_expr() string {
|
||||
orig := this.token_index
|
||||
|
||||
// Contains the type of the previously scanned token (initialized with
|
||||
// the token right under the cursor). This is the token to the *right* of
|
||||
// the current one.
|
||||
prev := this.token().tok
|
||||
loop:
|
||||
for {
|
||||
if !this.go_back() {
|
||||
return token_items_to_string(this.tokens[:orig])
|
||||
}
|
||||
switch this.token().tok {
|
||||
case token.PERIOD:
|
||||
// If the '.' is not followed by IDENT, it's invalid.
|
||||
if prev != token.IDENT {
|
||||
break loop
|
||||
}
|
||||
case token.IDENT:
|
||||
// Valid tokens after IDENT are '.', '[', '{' and '('.
|
||||
switch prev {
|
||||
case token.PERIOD, token.LBRACK, token.LBRACE, token.LPAREN:
|
||||
// all ok
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
case token.RBRACE:
|
||||
// This one can only be a part of type initialization, like:
|
||||
// Dummy{}.Hello()
|
||||
// It is valid Go if Hello method is defined on a non-pointer receiver.
|
||||
if prev != token.PERIOD {
|
||||
break loop
|
||||
}
|
||||
this.skip_to_balanced_pair()
|
||||
case token.RPAREN, token.RBRACK:
|
||||
// After ']' and ')' their opening counterparts are valid '[', '(',
|
||||
// as well as the dot.
|
||||
switch prev {
|
||||
case token.PERIOD, token.LBRACK, token.LPAREN:
|
||||
// all ok
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
this.skip_to_balanced_pair()
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
prev = this.token().tok
|
||||
}
|
||||
expr := token_items_to_string(this.tokens[this.token_index+1 : orig])
|
||||
if *g_debug {
|
||||
log.Printf("extracted expression tokens: %s", expr)
|
||||
}
|
||||
return expr
|
||||
}
|
||||
|
||||
// Given a slice of token_item, reassembles them into the original literal
|
||||
// expression.
|
||||
func token_items_to_string(tokens []token_item) string {
|
||||
var buf bytes.Buffer
|
||||
for _, t := range tokens {
|
||||
buf.WriteString(t.literal())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// this function is called when the cursor is at the '.' and you need to get the
|
||||
// declaration before that dot
|
||||
func (c *auto_complete_context) deduce_cursor_decl(iter *token_iterator) (*decl, ast.Expr) {
|
||||
expr, err := parser.ParseExpr(iter.extract_go_expr())
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return expr_to_decl(expr, c.current.scope), expr
|
||||
}
|
||||
|
||||
// try to find and extract the surrounding struct literal type
|
||||
func (c *auto_complete_context) deduce_struct_type_decl(iter *token_iterator) *decl {
|
||||
typ := iter.extract_struct_type()
|
||||
if typ == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
expr, err := parser.ParseExpr(typ)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
decl := type_to_decl(expr, c.current.scope)
|
||||
if decl == nil {
|
||||
return nil
|
||||
}
|
||||
if _, ok := decl.typ.(*ast.StructType); !ok {
|
||||
return nil
|
||||
}
|
||||
return decl
|
||||
}
|
||||
|
||||
// Entry point from autocompletion, the function looks at text before the cursor
|
||||
// and figures out the declaration the cursor is on. This declaration is
|
||||
// used in filtering the resulting set of autocompletion suggestions.
|
||||
func (c *auto_complete_context) deduce_cursor_context(file []byte, cursor int) (cursor_context, bool) {
|
||||
if cursor <= 0 {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
|
||||
iter := new_token_iterator(file, cursor)
|
||||
if len(iter.tokens) == 0 {
|
||||
return cursor_context{}, false
|
||||
}
|
||||
|
||||
// figure out what is just before the cursor
|
||||
switch tok := iter.token(); tok.tok {
|
||||
case token.STRING:
|
||||
// make sure cursor is inside the string
|
||||
s := tok.literal()
|
||||
if len(s) > 1 && s[len(s)-1] == '"' && tok.off+len(s) <= cursor {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
// now figure out if inside an import declaration
|
||||
var ptok = token.STRING
|
||||
for iter.go_back() {
|
||||
itok := iter.token().tok
|
||||
switch itok {
|
||||
case token.STRING:
|
||||
switch ptok {
|
||||
case token.SEMICOLON, token.IDENT, token.PERIOD:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.LPAREN, token.SEMICOLON:
|
||||
switch ptok {
|
||||
case token.STRING, token.IDENT, token.PERIOD:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.IDENT, token.PERIOD:
|
||||
switch ptok {
|
||||
case token.STRING:
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
case token.IMPORT:
|
||||
switch ptok {
|
||||
case token.STRING, token.IDENT, token.PERIOD, token.LPAREN:
|
||||
path_len := cursor - tok.off
|
||||
path := s[1:path_len]
|
||||
return cursor_context{decl_import: true, partial: path}, true
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
default:
|
||||
return cursor_context{}, true
|
||||
}
|
||||
ptok = itok
|
||||
}
|
||||
case token.PERIOD:
|
||||
// we're '<whatever>.'
|
||||
// figure out decl, Partial is ""
|
||||
decl, expr := c.deduce_cursor_decl(&iter)
|
||||
return cursor_context{decl: decl, expr: expr}, decl != nil
|
||||
case token.IDENT, token.TYPE, token.CONST, token.VAR, token.FUNC, token.PACKAGE:
|
||||
// we're '<whatever>.<ident>'
|
||||
// parse <ident> as Partial and figure out decl
|
||||
var partial string
|
||||
if tok.tok == token.IDENT {
|
||||
// Calculate the offset of the cursor position within the identifier.
|
||||
// For instance, if we are 'ab#c', we want partial_len = 2 and partial = ab.
|
||||
partial_len := cursor - tok.off
|
||||
|
||||
// If it happens that the cursor is past the end of the literal,
|
||||
// means there is a space between the literal and the cursor, think
|
||||
// of it as no context, because that's what it really is.
|
||||
if partial_len > len(tok.literal()) {
|
||||
return cursor_context{}, true
|
||||
}
|
||||
partial = tok.literal()[0:partial_len]
|
||||
} else {
|
||||
// Do not try to truncate if it is not an identifier.
|
||||
partial = tok.literal()
|
||||
}
|
||||
|
||||
iter.go_back()
|
||||
switch iter.token().tok {
|
||||
case token.PERIOD:
|
||||
decl, expr := c.deduce_cursor_decl(&iter)
|
||||
return cursor_context{decl: decl, partial: partial, expr: expr}, decl != nil
|
||||
case token.COMMA, token.LBRACE:
|
||||
// This can happen for struct fields:
|
||||
// &Struct{Hello: 1, Wor#} // (# - the cursor)
|
||||
// Let's try to find the struct type
|
||||
decl := c.deduce_struct_type_decl(&iter)
|
||||
return cursor_context{
|
||||
decl: decl,
|
||||
partial: partial,
|
||||
struct_field: decl != nil,
|
||||
}, true
|
||||
default:
|
||||
return cursor_context{partial: partial}, true
|
||||
}
|
||||
case token.COMMA, token.LBRACE:
|
||||
// Try to parse the current expression as a structure initialization.
|
||||
decl := c.deduce_struct_type_decl(&iter)
|
||||
return cursor_context{
|
||||
decl: decl,
|
||||
partial: "",
|
||||
struct_field: decl != nil,
|
||||
}, true
|
||||
}
|
||||
|
||||
return cursor_context{}, true
|
||||
}
|
||||
|
||||
// Decl deduction failed, but we're on "<ident>.", this ident can be an
|
||||
// unexported package, let's try to match the ident against a set of known
|
||||
// packages and if it matches try to import it.
|
||||
// TODO: Right now I've made a static list of built-in packages, but in theory
|
||||
// we could scan all GOPATH packages as well. Now, don't forget that default
|
||||
// package name has nothing to do with package file name, that's why we need to
|
||||
// scan the packages. And many of them will have conflicts. Can we make a smart
|
||||
// prediction algorithm which will prefer certain packages over another ones?
|
||||
func resolveKnownPackageIdent(ident string, filename string, context *package_lookup_context) *decl {
|
||||
importPath, ok := knownPackageIdents[ident]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
path, ok := abs_path_for_package(filename, importPath, context)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
p := new_package_file_cache(path)
|
||||
p.update_cache()
|
||||
return p.main
|
||||
}
|
||||
|
||||
var knownPackageIdents = map[string]string{
|
||||
"adler32": "hash/adler32",
|
||||
"aes": "crypto/aes",
|
||||
"ascii85": "encoding/ascii85",
|
||||
"asn1": "encoding/asn1",
|
||||
"ast": "go/ast",
|
||||
"atomic": "sync/atomic",
|
||||
"base32": "encoding/base32",
|
||||
"base64": "encoding/base64",
|
||||
"big": "math/big",
|
||||
"binary": "encoding/binary",
|
||||
"bufio": "bufio",
|
||||
"build": "go/build",
|
||||
"bytes": "bytes",
|
||||
"bzip2": "compress/bzip2",
|
||||
"cgi": "net/http/cgi",
|
||||
"cgo": "runtime/cgo",
|
||||
"cipher": "crypto/cipher",
|
||||
"cmplx": "math/cmplx",
|
||||
"color": "image/color",
|
||||
"constant": "go/constant",
|
||||
"context": "context",
|
||||
"cookiejar": "net/http/cookiejar",
|
||||
"crc32": "hash/crc32",
|
||||
"crc64": "hash/crc64",
|
||||
"crypto": "crypto",
|
||||
"csv": "encoding/csv",
|
||||
"debug": "runtime/debug",
|
||||
"des": "crypto/des",
|
||||
"doc": "go/doc",
|
||||
"draw": "image/draw",
|
||||
"driver": "database/sql/driver",
|
||||
"dsa": "crypto/dsa",
|
||||
"dwarf": "debug/dwarf",
|
||||
"ecdsa": "crypto/ecdsa",
|
||||
"elf": "debug/elf",
|
||||
"elliptic": "crypto/elliptic",
|
||||
"encoding": "encoding",
|
||||
"errors": "errors",
|
||||
"exec": "os/exec",
|
||||
"expvar": "expvar",
|
||||
"fcgi": "net/http/fcgi",
|
||||
"filepath": "path/filepath",
|
||||
"flag": "flag",
|
||||
"flate": "compress/flate",
|
||||
"fmt": "fmt",
|
||||
"fnv": "hash/fnv",
|
||||
"format": "go/format",
|
||||
"gif": "image/gif",
|
||||
"gob": "encoding/gob",
|
||||
"gosym": "debug/gosym",
|
||||
"gzip": "compress/gzip",
|
||||
"hash": "hash",
|
||||
"heap": "container/heap",
|
||||
"hex": "encoding/hex",
|
||||
"hmac": "crypto/hmac",
|
||||
"hpack": "vendor/golang_org/x/net/http2/hpack",
|
||||
"html": "html",
|
||||
"http": "net/http",
|
||||
"httplex": "vendor/golang_org/x/net/lex/httplex",
|
||||
"httptest": "net/http/httptest",
|
||||
"httptrace": "net/http/httptrace",
|
||||
"httputil": "net/http/httputil",
|
||||
"image": "image",
|
||||
"importer": "go/importer",
|
||||
"io": "io",
|
||||
"iotest": "testing/iotest",
|
||||
"ioutil": "io/ioutil",
|
||||
"jpeg": "image/jpeg",
|
||||
"json": "encoding/json",
|
||||
"jsonrpc": "net/rpc/jsonrpc",
|
||||
"list": "container/list",
|
||||
"log": "log",
|
||||
"lzw": "compress/lzw",
|
||||
"macho": "debug/macho",
|
||||
"mail": "net/mail",
|
||||
"math": "math",
|
||||
"md5": "crypto/md5",
|
||||
"mime": "mime",
|
||||
"multipart": "mime/multipart",
|
||||
"net": "net",
|
||||
"os": "os",
|
||||
"palette": "image/color/palette",
|
||||
"parse": "text/template/parse",
|
||||
"parser": "go/parser",
|
||||
"path": "path",
|
||||
"pe": "debug/pe",
|
||||
"pem": "encoding/pem",
|
||||
"pkix": "crypto/x509/pkix",
|
||||
"plan9obj": "debug/plan9obj",
|
||||
"png": "image/png",
|
||||
"pprof": "net/http/pprof",
|
||||
"printer": "go/printer",
|
||||
"quick": "testing/quick",
|
||||
"quotedprintable": "mime/quotedprintable",
|
||||
"race": "runtime/race",
|
||||
"rand": "math/rand",
|
||||
"rc4": "crypto/rc4",
|
||||
"reflect": "reflect",
|
||||
"regexp": "regexp",
|
||||
"ring": "container/ring",
|
||||
"rpc": "net/rpc",
|
||||
"rsa": "crypto/rsa",
|
||||
"runtime": "runtime",
|
||||
"scanner": "text/scanner",
|
||||
"sha1": "crypto/sha1",
|
||||
"sha256": "crypto/sha256",
|
||||
"sha512": "crypto/sha512",
|
||||
"signal": "os/signal",
|
||||
"smtp": "net/smtp",
|
||||
"sort": "sort",
|
||||
"sql": "database/sql",
|
||||
"strconv": "strconv",
|
||||
"strings": "strings",
|
||||
"subtle": "crypto/subtle",
|
||||
"suffixarray": "index/suffixarray",
|
||||
"sync": "sync",
|
||||
"syntax": "regexp/syntax",
|
||||
"syscall": "syscall",
|
||||
"syslog": "log/syslog",
|
||||
"tabwriter": "text/tabwriter",
|
||||
"tar": "archive/tar",
|
||||
"template": "html/template",
|
||||
"testing": "testing",
|
||||
"textproto": "net/textproto",
|
||||
"time": "time",
|
||||
"tls": "crypto/tls",
|
||||
"token": "go/token",
|
||||
"trace": "runtime/trace",
|
||||
"types": "go/types",
|
||||
"unicode": "unicode",
|
||||
"url": "net/url",
|
||||
"user": "os/user",
|
||||
"utf16": "unicode/utf16",
|
||||
"utf8": "unicode/utf8",
|
||||
"x509": "crypto/x509",
|
||||
"xml": "encoding/xml",
|
||||
"zip": "archive/zip",
|
||||
"zlib": "compress/zlib",
|
||||
//"scanner": "go/scanner", // DUP: prefer text/scanner
|
||||
//"template": "text/template", // DUP: prefer html/template
|
||||
//"pprof": "runtime/pprof", // DUP: prefer net/http/pprof
|
||||
//"rand": "crypto/rand", // DUP: prefer math/rand
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,518 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// []package_import
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_import struct {
|
||||
alias string
|
||||
path string
|
||||
}
|
||||
|
||||
// Parses import declarations until the first non-import declaration and fills
|
||||
// `packages` array with import information.
|
||||
func collect_package_imports(filename string, decls []ast.Decl, context *package_lookup_context) []package_import {
|
||||
pi := make([]package_import, 0, 16)
|
||||
for _, decl := range decls {
|
||||
if gd, ok := decl.(*ast.GenDecl); ok && gd.Tok == token.IMPORT {
|
||||
for _, spec := range gd.Specs {
|
||||
imp := spec.(*ast.ImportSpec)
|
||||
path, alias := path_and_alias(imp)
|
||||
path, ok := abs_path_for_package(filename, path, context)
|
||||
if ok && alias != "_" {
|
||||
pi = append(pi, package_import{alias, path})
|
||||
}
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return pi
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// decl_file_cache
|
||||
//
|
||||
// Contains cache for top-level declarations of a file as well as its
|
||||
// contents, AST and import information.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type decl_file_cache struct {
|
||||
name string // file name
|
||||
mtime int64 // last modification time
|
||||
|
||||
decls map[string]*decl // top-level declarations
|
||||
error error // last error
|
||||
packages []package_import // import information
|
||||
filescope *scope
|
||||
|
||||
fset *token.FileSet
|
||||
context *package_lookup_context
|
||||
}
|
||||
|
||||
func new_decl_file_cache(name string, context *package_lookup_context) *decl_file_cache {
|
||||
return &decl_file_cache{
|
||||
name: name,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) update() {
|
||||
stat, err := os.Stat(f.name)
|
||||
if err != nil {
|
||||
f.decls = nil
|
||||
f.error = err
|
||||
f.fset = nil
|
||||
return
|
||||
}
|
||||
|
||||
statmtime := stat.ModTime().UnixNano()
|
||||
if f.mtime == statmtime {
|
||||
return
|
||||
}
|
||||
|
||||
f.mtime = statmtime
|
||||
f.read_file()
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) read_file() {
|
||||
var data []byte
|
||||
data, f.error = file_reader.read_file(f.name)
|
||||
if f.error != nil {
|
||||
return
|
||||
}
|
||||
data, _ = filter_out_shebang(data)
|
||||
|
||||
f.process_data(data)
|
||||
}
|
||||
|
||||
func (f *decl_file_cache) process_data(data []byte) {
|
||||
var file *ast.File
|
||||
f.fset = token.NewFileSet()
|
||||
file, f.error = parser.ParseFile(f.fset, "", data, 0)
|
||||
f.filescope = new_scope(nil)
|
||||
for _, d := range file.Decls {
|
||||
anonymify_ast(d, 0, f.filescope)
|
||||
}
|
||||
f.packages = collect_package_imports(f.name, file.Decls, f.context)
|
||||
f.decls = make(map[string]*decl, len(file.Decls))
|
||||
for _, decl := range file.Decls {
|
||||
append_to_top_decls(f.decls, decl, f.filescope)
|
||||
}
|
||||
}
|
||||
|
||||
func append_to_top_decls(decls map[string]*decl, decl ast.Decl, scope *scope) {
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, 0, typ, v, vi, scope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
methodof := method_of(decl)
|
||||
if methodof != "" {
|
||||
decl, ok := decls[methodof]
|
||||
if ok {
|
||||
decl.add_child(d)
|
||||
} else {
|
||||
decl = new_decl(methodof, decl_methods_stub, scope)
|
||||
decls[methodof] = decl
|
||||
decl.add_child(d)
|
||||
}
|
||||
} else {
|
||||
decl, ok := decls[d.name]
|
||||
if ok {
|
||||
decl.expand_or_replace(d)
|
||||
} else {
|
||||
decls[d.name] = d
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func abs_path_for_package(filename, p string, context *package_lookup_context) (string, bool) {
|
||||
dir, _ := filepath.Split(filename)
|
||||
if len(p) == 0 {
|
||||
return "", false
|
||||
}
|
||||
if p[0] == '.' {
|
||||
return fmt.Sprintf("%s.a", filepath.Join(dir, p)), true
|
||||
}
|
||||
pkg, ok := find_go_dag_package(p, dir)
|
||||
if ok {
|
||||
return pkg, true
|
||||
}
|
||||
return find_global_file(p, context)
|
||||
}
|
||||
|
||||
func path_and_alias(imp *ast.ImportSpec) (string, string) {
|
||||
path := ""
|
||||
if imp.Path != nil && len(imp.Path.Value) > 0 {
|
||||
path = string(imp.Path.Value)
|
||||
path = path[1 : len(path)-1]
|
||||
}
|
||||
alias := ""
|
||||
if imp.Name != nil {
|
||||
alias = imp.Name.Name
|
||||
}
|
||||
return path, alias
|
||||
}
|
||||
|
||||
func find_go_dag_package(imp, filedir string) (string, bool) {
|
||||
// Support godag directory structure
|
||||
dir, pkg := filepath.Split(imp)
|
||||
godag_pkg := filepath.Join(filedir, "..", dir, "_obj", pkg+".a")
|
||||
if file_exists(godag_pkg) {
|
||||
return godag_pkg, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// autobuild compares the mod time of the source files of the package, and if any of them is newer
|
||||
// than the package object file will rebuild it.
|
||||
func autobuild(p *build.Package) error {
|
||||
if p.Dir == "" {
|
||||
return fmt.Errorf("no files to build")
|
||||
}
|
||||
ps, err := os.Stat(p.PkgObj)
|
||||
if err != nil {
|
||||
// Assume package file does not exist and build for the first time.
|
||||
return build_package(p)
|
||||
}
|
||||
pt := ps.ModTime()
|
||||
fs, err := readdir_lstat(p.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range fs {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
if f.ModTime().After(pt) {
|
||||
// Source file is newer than package file; rebuild.
|
||||
return build_package(p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// build_package builds the package by calling `go install package/import`. If everything compiles
|
||||
// correctly, the newly compiled package should then be in the usual place in the `$GOPATH/pkg`
|
||||
// directory, and gocode will pick it up from there.
|
||||
func build_package(p *build.Package) error {
|
||||
if *g_debug {
|
||||
log.Printf("-------------------")
|
||||
log.Printf("rebuilding package %s", p.Name)
|
||||
log.Printf("package import: %s", p.ImportPath)
|
||||
log.Printf("package object: %s", p.PkgObj)
|
||||
log.Printf("package source dir: %s", p.Dir)
|
||||
log.Printf("package source files: %v", p.GoFiles)
|
||||
log.Printf("GOPATH: %v", g_daemon.context.GOPATH)
|
||||
log.Printf("GOROOT: %v", g_daemon.context.GOROOT)
|
||||
}
|
||||
env := os.Environ()
|
||||
for i, v := range env {
|
||||
if strings.HasPrefix(v, "GOPATH=") {
|
||||
env[i] = "GOPATH=" + g_daemon.context.GOPATH
|
||||
} else if strings.HasPrefix(v, "GOROOT=") {
|
||||
env[i] = "GOROOT=" + g_daemon.context.GOROOT
|
||||
}
|
||||
}
|
||||
|
||||
cmd := exec.Command("go", "install", p.ImportPath)
|
||||
cmd.Env = env
|
||||
|
||||
// TODO: Should read STDERR rather than STDOUT.
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if *g_debug {
|
||||
log.Printf("build out: %s\n", string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// executes autobuild function if autobuild option is enabled, logs error and
|
||||
// ignores it
|
||||
func try_autobuild(p *build.Package) {
|
||||
if g_config.Autobuild {
|
||||
err := autobuild(p)
|
||||
if err != nil && *g_debug {
|
||||
log.Printf("Autobuild error: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func log_found_package_maybe(imp, pkgpath string) {
|
||||
if *g_debug {
|
||||
log.Printf("Found %q at %q\n", imp, pkgpath)
|
||||
}
|
||||
}
|
||||
|
||||
func log_build_context(context *package_lookup_context) {
|
||||
log.Printf(" GOROOT: %s\n", context.GOROOT)
|
||||
log.Printf(" GOPATH: %s\n", context.GOPATH)
|
||||
log.Printf(" GOOS: %s\n", context.GOOS)
|
||||
log.Printf(" GOARCH: %s\n", context.GOARCH)
|
||||
log.Printf(" BzlProjectRoot: %q\n", context.BzlProjectRoot)
|
||||
log.Printf(" GBProjectRoot: %q\n", context.GBProjectRoot)
|
||||
log.Printf(" lib-path: %q\n", g_config.LibPath)
|
||||
}
|
||||
|
||||
// find_global_file returns the file path of the compiled package corresponding to the specified
|
||||
// import, and a boolean stating whether such path is valid.
|
||||
// TODO: Return only one value, possibly empty string if not found.
|
||||
func find_global_file(imp string, context *package_lookup_context) (string, bool) {
|
||||
// gocode synthetically generates the builtin package
|
||||
// "unsafe", since the "unsafe.a" package doesn't really exist.
|
||||
// Thus, when the user request for the package "unsafe" we
|
||||
// would return synthetic global file that would be used
|
||||
// just as a key name to find this synthetic package
|
||||
if imp == "unsafe" {
|
||||
return "unsafe", true
|
||||
}
|
||||
|
||||
pkgfile := fmt.Sprintf("%s.a", imp)
|
||||
|
||||
// if lib-path is defined, use it
|
||||
if g_config.LibPath != "" {
|
||||
for _, p := range filepath.SplitList(g_config.LibPath) {
|
||||
pkg_path := filepath.Join(p, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
// Also check the relevant pkg/OS_ARCH dir for the libpath, if provided.
|
||||
pkgdir := fmt.Sprintf("%s_%s", context.GOOS, context.GOARCH)
|
||||
pkg_path = filepath.Join(p, "pkg", pkgdir, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// gb-specific lookup mode, only if the root dir was found
|
||||
if g_config.PackageLookupMode == "gb" && context.GBProjectRoot != "" {
|
||||
root := context.GBProjectRoot
|
||||
pkg_path := filepath.Join(root, "pkg", context.GOOS+"-"+context.GOARCH, pkgfile)
|
||||
if file_exists(pkg_path) {
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
|
||||
// bzl-specific lookup mode, only if the root dir was found
|
||||
if g_config.PackageLookupMode == "bzl" && context.BzlProjectRoot != "" {
|
||||
var root, impath string
|
||||
if strings.HasPrefix(imp, g_config.CustomPkgPrefix+"/") {
|
||||
root = filepath.Join(context.BzlProjectRoot, "bazel-bin")
|
||||
impath = imp[len(g_config.CustomPkgPrefix)+1:]
|
||||
} else if g_config.CustomVendorDir != "" {
|
||||
// Try custom vendor dir.
|
||||
root = filepath.Join(context.BzlProjectRoot, "bazel-bin", g_config.CustomVendorDir)
|
||||
impath = imp
|
||||
}
|
||||
|
||||
if root != "" && impath != "" {
|
||||
// There might be more than one ".a" files in the pkg path with bazel.
|
||||
// But the best practice is to keep one go_library build target in each
|
||||
// pakcage directory so that it follows the standard Go package
|
||||
// structure. Thus here we assume there is at most one ".a" file existing
|
||||
// in the pkg path.
|
||||
if d, err := os.Open(filepath.Join(root, impath)); err == nil {
|
||||
defer d.Close()
|
||||
|
||||
if fis, err := d.Readdir(-1); err == nil {
|
||||
for _, fi := range fis {
|
||||
if !fi.IsDir() && filepath.Ext(fi.Name()) == ".a" {
|
||||
pkg_path := filepath.Join(root, impath, fi.Name())
|
||||
log_found_package_maybe(imp, pkg_path)
|
||||
return pkg_path, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if context.CurrentPackagePath != "" {
|
||||
// Try vendor path first, see GO15VENDOREXPERIMENT.
|
||||
// We don't check this environment variable however, seems like there is
|
||||
// almost no harm in doing so (well.. if you experiment with vendoring,
|
||||
// gocode will fail after enabling/disabling the flag, and you'll be
|
||||
// forced to get rid of vendor binaries). But asking users to set this
|
||||
// env var is up will bring more trouble. Because we also need to pass
|
||||
// it from client to server, make sure their editors set it, etc.
|
||||
// So, whatever, let's just pretend it's always on.
|
||||
package_path := context.CurrentPackagePath
|
||||
for {
|
||||
limp := filepath.Join(package_path, "vendor", imp)
|
||||
if p, err := context.Import(limp, "", build.AllowBinary|build.FindOnly); err == nil {
|
||||
try_autobuild(p)
|
||||
if file_exists(p.PkgObj) {
|
||||
log_found_package_maybe(imp, p.PkgObj)
|
||||
return p.PkgObj, true
|
||||
}
|
||||
}
|
||||
if package_path == "" {
|
||||
break
|
||||
}
|
||||
next_path := filepath.Dir(package_path)
|
||||
// let's protect ourselves from inf recursion here
|
||||
if next_path == package_path {
|
||||
break
|
||||
}
|
||||
package_path = next_path
|
||||
}
|
||||
}
|
||||
|
||||
if p, err := context.Import(imp, "", build.AllowBinary|build.FindOnly); err == nil {
|
||||
try_autobuild(p)
|
||||
if file_exists(p.PkgObj) {
|
||||
log_found_package_maybe(imp, p.PkgObj)
|
||||
return p.PkgObj, true
|
||||
}
|
||||
}
|
||||
|
||||
if *g_debug {
|
||||
log.Printf("Import path %q was not resolved\n", imp)
|
||||
log.Println("Gocode's build context is:")
|
||||
log_build_context(context)
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func package_name(file *ast.File) string {
|
||||
if file.Name != nil {
|
||||
return file.Name.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// decl_cache
|
||||
//
|
||||
// Thread-safe collection of DeclFileCache entities.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_lookup_context struct {
|
||||
build.Context
|
||||
BzlProjectRoot string
|
||||
GBProjectRoot string
|
||||
CurrentPackagePath string
|
||||
}
|
||||
|
||||
// gopath returns the list of Go path directories.
|
||||
func (ctxt *package_lookup_context) gopath() []string {
|
||||
var all []string
|
||||
for _, p := range filepath.SplitList(ctxt.GOPATH) {
|
||||
if p == "" || p == ctxt.GOROOT {
|
||||
// Empty paths are uninteresting.
|
||||
// If the path is the GOROOT, ignore it.
|
||||
// People sometimes set GOPATH=$GOROOT.
|
||||
// Do not get confused by this common mistake.
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(p, "~") {
|
||||
// Path segments starting with ~ on Unix are almost always
|
||||
// users who have incorrectly quoted ~ while setting GOPATH,
|
||||
// preventing it from expanding to $HOME.
|
||||
// The situation is made more confusing by the fact that
|
||||
// bash allows quoted ~ in $PATH (most shells do not).
|
||||
// Do not get confused by this, and do not try to use the path.
|
||||
// It does not exist, and printing errors about it confuses
|
||||
// those users even more, because they think "sure ~ exists!".
|
||||
// The go command diagnoses this situation and prints a
|
||||
// useful error.
|
||||
// On Windows, ~ is used in short names, such as c:\progra~1
|
||||
// for c:\program files.
|
||||
continue
|
||||
}
|
||||
all = append(all, p)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (ctxt *package_lookup_context) pkg_dirs() []string {
|
||||
pkgdir := fmt.Sprintf("%s_%s", ctxt.GOOS, ctxt.GOARCH)
|
||||
|
||||
var all []string
|
||||
if ctxt.GOROOT != "" {
|
||||
dir := filepath.Join(ctxt.GOROOT, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
|
||||
switch g_config.PackageLookupMode {
|
||||
case "go":
|
||||
for _, p := range ctxt.gopath() {
|
||||
dir := filepath.Join(p, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
case "gb":
|
||||
if ctxt.GBProjectRoot != "" {
|
||||
pkgdir := fmt.Sprintf("%s-%s", ctxt.GOOS, ctxt.GOARCH)
|
||||
dir := filepath.Join(ctxt.GBProjectRoot, "pkg", pkgdir)
|
||||
if is_dir(dir) {
|
||||
all = append(all, dir)
|
||||
}
|
||||
}
|
||||
case "bzl":
|
||||
// TODO: Support bazel mode
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
type decl_cache struct {
|
||||
cache map[string]*decl_file_cache
|
||||
context *package_lookup_context
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func new_decl_cache(context *package_lookup_context) *decl_cache {
|
||||
return &decl_cache{
|
||||
cache: make(map[string]*decl_file_cache),
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *decl_cache) get(filename string) *decl_file_cache {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
f, ok := c.cache[filename]
|
||||
if !ok {
|
||||
f = new_decl_file_cache(filename, c.context)
|
||||
c.cache[filename] = f
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func (c *decl_cache) get_and_update(filename string) *decl_file_cache {
|
||||
f := c.get(filename)
|
||||
f.update()
|
||||
return f
|
||||
}
|
|
@ -0,0 +1,172 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// formatter interfaces
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type formatter interface {
|
||||
write_candidates(candidates []candidate, num int)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// nice_formatter (just for testing, simple textual output)
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type nice_formatter struct{}
|
||||
|
||||
func (*nice_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Printf("Nothing to complete.\n")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d candidates:\n", len(candidates))
|
||||
for _, c := range candidates {
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf(" %s\n", abbr)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// vim_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type vim_formatter struct{}
|
||||
|
||||
func (*vim_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Print("[0, []]")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("[%d, [", num)
|
||||
for i, c := range candidates {
|
||||
if i != 0 {
|
||||
fmt.Printf(", ")
|
||||
}
|
||||
|
||||
word := c.Name
|
||||
if c.Class == decl_func {
|
||||
word += "("
|
||||
if strings.HasPrefix(c.Type, "func()") {
|
||||
word += ")"
|
||||
}
|
||||
}
|
||||
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf("{'word': '%s', 'abbr': '%s', 'info': '%s'}", word, abbr, abbr)
|
||||
}
|
||||
fmt.Printf("]]")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// godit_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type godit_formatter struct{}
|
||||
|
||||
func (*godit_formatter) write_candidates(candidates []candidate, num int) {
|
||||
fmt.Printf("%d,,%d\n", num, len(candidates))
|
||||
for _, c := range candidates {
|
||||
contents := c.Name
|
||||
if c.Class == decl_func {
|
||||
contents += "("
|
||||
if strings.HasPrefix(c.Type, "func()") {
|
||||
contents += ")"
|
||||
}
|
||||
}
|
||||
|
||||
display := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
display = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
fmt.Printf("%s,,%s\n", display, contents)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// emacs_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type emacs_formatter struct{}
|
||||
|
||||
func (*emacs_formatter) write_candidates(candidates []candidate, num int) {
|
||||
for _, c := range candidates {
|
||||
var hint string
|
||||
switch {
|
||||
case c.Class == decl_func:
|
||||
hint = c.Type
|
||||
case c.Type == "":
|
||||
hint = c.Class.String()
|
||||
default:
|
||||
hint = c.Class.String() + " " + c.Type
|
||||
}
|
||||
fmt.Printf("%s,,%s\n", c.Name, hint)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// csv_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type csv_formatter struct{}
|
||||
|
||||
func (*csv_formatter) write_candidates(candidates []candidate, num int) {
|
||||
for _, c := range candidates {
|
||||
fmt.Printf("%s,,%s,,%s\n", c.Class, c.Name, c.Type)
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// json_formatter
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type json_formatter struct{}
|
||||
|
||||
func (*json_formatter) write_candidates(candidates []candidate, num int) {
|
||||
if candidates == nil {
|
||||
fmt.Print("[]")
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf(`[%d, [`, num)
|
||||
for i, c := range candidates {
|
||||
if i != 0 {
|
||||
fmt.Printf(", ")
|
||||
}
|
||||
fmt.Printf(`{"class": "%s", "name": "%s", "type": "%s"}`,
|
||||
c.Class, c.Name, c.Type)
|
||||
}
|
||||
fmt.Print("]]")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func get_formatter(name string) formatter {
|
||||
switch name {
|
||||
case "vim":
|
||||
return new(vim_formatter)
|
||||
case "emacs":
|
||||
return new(emacs_formatter)
|
||||
case "nice":
|
||||
return new(nice_formatter)
|
||||
case "csv":
|
||||
return new(csv_formatter)
|
||||
case "json":
|
||||
return new(json_formatter)
|
||||
case "godit":
|
||||
return new(godit_formatter)
|
||||
}
|
||||
return new(nice_formatter)
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
g_is_server = flag.Bool("s", false, "run a server instead of a client")
|
||||
g_format = flag.String("f", "nice", "output format (vim | emacs | nice | csv | json)")
|
||||
g_input = flag.String("in", "", "use this file instead of stdin input")
|
||||
g_sock = create_sock_flag("sock", "socket type (unix | tcp)")
|
||||
g_addr = flag.String("addr", "127.0.0.1:37373", "address for tcp socket")
|
||||
g_debug = flag.Bool("debug", false, "enable server-side debug mode")
|
||||
g_profile = flag.Int("profile", 0, "port on which to expose profiling information for pprof; 0 to disable profiling")
|
||||
)
|
||||
|
||||
func get_socket_filename() string {
|
||||
user := os.Getenv("USER")
|
||||
if user == "" {
|
||||
user = "all"
|
||||
}
|
||||
return filepath.Join(os.TempDir(), fmt.Sprintf("gocode-daemon.%s", user))
|
||||
}
|
||||
|
||||
func show_usage() {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Usage: %s [-s] [-f=<format>] [-in=<path>] [-sock=<type>] [-addr=<addr>]\n"+
|
||||
" <command> [<args>]\n\n",
|
||||
os.Args[0])
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Flags:\n")
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"\nCommands:\n"+
|
||||
" autocomplete [<path>] <offset> main autocompletion command\n"+
|
||||
" close close the gocode daemon\n"+
|
||||
" status gocode daemon status report\n"+
|
||||
" drop-cache drop gocode daemon's cache\n"+
|
||||
" set [<name> [<value>]] list or set config options\n")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = show_usage
|
||||
flag.Parse()
|
||||
|
||||
var retval int
|
||||
if *g_is_server {
|
||||
go func() {
|
||||
if *g_profile <= 0 {
|
||||
return
|
||||
}
|
||||
addr := fmt.Sprintf("localhost:%d", *g_profile)
|
||||
// Use the following commands to profile the binary:
|
||||
// go tool pprof http://localhost:6060/debug/pprof/profile # 30-second CPU profile
|
||||
// go tool pprof http://localhost:6060/debug/pprof/heap # heap profile
|
||||
// go tool pprof http://localhost:6060/debug/pprof/block # goroutine blocking profile
|
||||
// See http://blog.golang.org/profiling-go-programs for more info.
|
||||
log.Printf("enabling profiler on %s", addr)
|
||||
log.Print(http.ListenAndServe(addr, nil))
|
||||
}()
|
||||
retval = do_server()
|
||||
} else {
|
||||
retval = do_client()
|
||||
}
|
||||
os.Exit(retval)
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func create_sock_flag(name, desc string) *string {
|
||||
return flag.String(name, "unix", desc)
|
||||
}
|
||||
|
||||
// Full path of the current executable
|
||||
func get_executable_filename() string {
|
||||
// try readlink first
|
||||
path, err := os.Readlink("/proc/self/exe")
|
||||
if err == nil {
|
||||
return path
|
||||
}
|
||||
// use argv[0]
|
||||
path = os.Args[0]
|
||||
if !filepath.IsAbs(path) {
|
||||
cwd, _ := os.Getwd()
|
||||
path = filepath.Join(cwd, path)
|
||||
}
|
||||
if file_exists(path) {
|
||||
return path
|
||||
}
|
||||
// Fallback : use "gocode" and assume we are in the PATH...
|
||||
path, err = exec.LookPath("gocode")
|
||||
if err == nil {
|
||||
return path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// config location
|
||||
|
||||
func config_dir() string {
|
||||
return filepath.Join(xdg_home_dir(), "gocode")
|
||||
}
|
||||
|
||||
func config_file() string {
|
||||
return filepath.Join(xdg_home_dir(), "gocode", "config.json")
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
shell32 = syscall.NewLazyDLL("shell32.dll")
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
)
|
||||
|
||||
var (
|
||||
proc_sh_get_folder_path = shell32.NewProc("SHGetFolderPathW")
|
||||
proc_get_module_file_name = kernel32.NewProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
func create_sock_flag(name, desc string) *string {
|
||||
return flag.String(name, "tcp", desc)
|
||||
}
|
||||
|
||||
// Full path of the current executable
|
||||
func get_executable_filename() string {
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
ret, _, err := syscall.Syscall(proc_get_module_file_name.Addr(), 3,
|
||||
0, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
|
||||
if int(ret) == 0 {
|
||||
panic(fmt.Sprintf("GetModuleFileNameW : err %d", int(err)))
|
||||
}
|
||||
return syscall.UTF16ToString(b)
|
||||
}
|
||||
|
||||
const (
|
||||
csidl_appdata = 0x1a
|
||||
)
|
||||
|
||||
func get_appdata_folder_path() string {
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
ret, _, err := syscall.Syscall6(proc_sh_get_folder_path.Addr(), 5,
|
||||
0, csidl_appdata, 0, 0, uintptr(unsafe.Pointer(&b[0])), 0)
|
||||
if int(ret) != 0 {
|
||||
panic(fmt.Sprintf("SHGetFolderPathW : err %d", int(err)))
|
||||
}
|
||||
return syscall.UTF16ToString(b)
|
||||
}
|
||||
|
||||
func config_dir() string {
|
||||
return filepath.Join(get_appdata_folder_path(), "gocode")
|
||||
}
|
||||
|
||||
func config_file() string {
|
||||
return filepath.Join(get_appdata_folder_path(), "gocode", "config.json")
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type package_parser interface {
|
||||
parse_export(callback func(pkg string, decl ast.Decl))
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// package_file_cache
|
||||
//
|
||||
// Structure that represents a cache for an imported pacakge. In other words
|
||||
// these are the contents of an archive (*.a) file.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_file_cache struct {
|
||||
name string // file name
|
||||
mtime int64
|
||||
defalias string
|
||||
|
||||
scope *scope
|
||||
main *decl // package declaration
|
||||
others map[string]*decl
|
||||
}
|
||||
|
||||
func new_package_file_cache(name string) *package_file_cache {
|
||||
m := new(package_file_cache)
|
||||
m.name = name
|
||||
m.mtime = 0
|
||||
m.defalias = ""
|
||||
return m
|
||||
}
|
||||
|
||||
// Creates a cache that stays in cache forever. Useful for built-in packages.
|
||||
func new_package_file_cache_forever(name, defalias string) *package_file_cache {
|
||||
m := new(package_file_cache)
|
||||
m.name = name
|
||||
m.mtime = -1
|
||||
m.defalias = defalias
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *package_file_cache) find_file() string {
|
||||
if file_exists(m.name) {
|
||||
return m.name
|
||||
}
|
||||
|
||||
n := len(m.name)
|
||||
filename := m.name[:n-1] + "6"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
|
||||
filename = m.name[:n-1] + "8"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
|
||||
filename = m.name[:n-1] + "5"
|
||||
if file_exists(filename) {
|
||||
return filename
|
||||
}
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *package_file_cache) update_cache() {
|
||||
if m.mtime == -1 {
|
||||
return
|
||||
}
|
||||
fname := m.find_file()
|
||||
stat, err := os.Stat(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
statmtime := stat.ModTime().UnixNano()
|
||||
if m.mtime != statmtime {
|
||||
m.mtime = statmtime
|
||||
|
||||
data, err := file_reader.read_file(fname)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m.process_package_data(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *package_file_cache) process_package_data(data []byte) {
|
||||
m.scope = new_scope(g_universe_scope)
|
||||
|
||||
// find import section
|
||||
i := bytes.Index(data, []byte{'\n', '$', '$'})
|
||||
if i == -1 {
|
||||
panic(fmt.Sprintf("Can't find the import section in the package file %s", m.name))
|
||||
}
|
||||
data = data[i+len("\n$$"):]
|
||||
|
||||
// main package
|
||||
m.main = new_decl(m.name, decl_package, nil)
|
||||
// create map for other packages
|
||||
m.others = make(map[string]*decl)
|
||||
|
||||
var pp package_parser
|
||||
if data[0] == 'B' {
|
||||
// binary format, skip 'B\n'
|
||||
data = data[2:]
|
||||
var p gc_bin_parser
|
||||
p.init(data, m)
|
||||
pp = &p
|
||||
} else {
|
||||
// textual format, find the beginning of the package clause
|
||||
i = bytes.Index(data, []byte{'p', 'a', 'c', 'k', 'a', 'g', 'e'})
|
||||
if i == -1 {
|
||||
panic("Can't find the package clause")
|
||||
}
|
||||
data = data[i:]
|
||||
|
||||
var p gc_parser
|
||||
p.init(data, m)
|
||||
pp = &p
|
||||
}
|
||||
|
||||
pp.parse_export(func(pkg string, decl ast.Decl) {
|
||||
anonymify_ast(decl, decl_foreign, m.scope)
|
||||
if pkg == "" || strings.HasPrefix(pkg, "#") {
|
||||
// main package
|
||||
add_ast_decl_to_package(m.main, decl, m.scope)
|
||||
} else {
|
||||
// others
|
||||
if _, ok := m.others[pkg]; !ok {
|
||||
m.others[pkg] = new_decl(pkg, decl_package, nil)
|
||||
}
|
||||
add_ast_decl_to_package(m.others[pkg], decl, m.scope)
|
||||
}
|
||||
})
|
||||
|
||||
// hack, add ourselves to the package scope
|
||||
mainName := "#" + m.defalias
|
||||
m.add_package_to_scope(mainName, m.name)
|
||||
|
||||
// replace dummy package decls in package scope to actual packages
|
||||
for key := range m.scope.entities {
|
||||
if !strings.HasPrefix(key, "#") && !strings.HasPrefix(key, "!") {
|
||||
continue
|
||||
}
|
||||
pkg, ok := m.others[key]
|
||||
if !ok && key == mainName {
|
||||
pkg = m.main
|
||||
}
|
||||
m.scope.replace_decl(key, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *package_file_cache) add_package_to_scope(alias, realname string) {
|
||||
d := new_decl(realname, decl_package, nil)
|
||||
m.scope.add_decl(alias, d)
|
||||
}
|
||||
|
||||
func add_ast_decl_to_package(pkg *decl, decl ast.Decl, scope *scope) {
|
||||
foreach_decl(decl, func(data *foreach_decl_struct) {
|
||||
class := ast_decl_class(data.decl)
|
||||
for i, name := range data.names {
|
||||
typ, v, vi := data.type_value_index(i)
|
||||
|
||||
d := new_decl_full(name.Name, class, decl_foreign, typ, v, vi, scope)
|
||||
if d == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !name.IsExported() && d.class != decl_type {
|
||||
return
|
||||
}
|
||||
|
||||
methodof := method_of(data.decl)
|
||||
if methodof != "" {
|
||||
decl := pkg.find_child(methodof)
|
||||
if decl != nil {
|
||||
decl.add_child(d)
|
||||
} else {
|
||||
decl = new_decl(methodof, decl_methods_stub, scope)
|
||||
decl.add_child(d)
|
||||
pkg.add_child(decl)
|
||||
}
|
||||
} else {
|
||||
decl := pkg.find_child(d.name)
|
||||
if decl != nil {
|
||||
decl.expand_or_replace(d)
|
||||
} else {
|
||||
pkg.add_child(d)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// package_cache
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type package_cache map[string]*package_file_cache
|
||||
|
||||
func new_package_cache() package_cache {
|
||||
m := make(package_cache)
|
||||
|
||||
// add built-in "unsafe" package
|
||||
m.add_builtin_unsafe_package()
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Function fills 'ps' set with packages from 'packages' import information.
|
||||
// In case if package is not in the cache, it creates one and adds one to the cache.
|
||||
func (c package_cache) append_packages(ps map[string]*package_file_cache, pkgs []package_import) {
|
||||
for _, m := range pkgs {
|
||||
if _, ok := ps[m.path]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if mod, ok := c[m.path]; ok {
|
||||
ps[m.path] = mod
|
||||
} else {
|
||||
mod = new_package_file_cache(m.path)
|
||||
ps[m.path] = mod
|
||||
c[m.path] = mod
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var g_builtin_unsafe_package = []byte(`
|
||||
import
|
||||
$$
|
||||
package unsafe
|
||||
type @"".Pointer uintptr
|
||||
func @"".Offsetof (? any) uintptr
|
||||
func @"".Sizeof (? any) uintptr
|
||||
func @"".Alignof (? any) uintptr
|
||||
func @"".Typeof (i interface { }) interface { }
|
||||
func @"".Reflect (i interface { }) (typ interface { }, addr @"".Pointer)
|
||||
func @"".Unreflect (typ interface { }, addr @"".Pointer) interface { }
|
||||
func @"".New (typ interface { }) @"".Pointer
|
||||
func @"".NewArray (typ interface { }, n int) @"".Pointer
|
||||
|
||||
$$
|
||||
`)
|
||||
|
||||
func (c package_cache) add_builtin_unsafe_package() {
|
||||
pkg := new_package_file_cache_forever("unsafe", "unsafe")
|
||||
pkg.process_package_data(g_builtin_unsafe_package)
|
||||
c["unsafe"] = pkg
|
||||
}
|
|
@ -0,0 +1,762 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// gc_bin_parser
|
||||
//
|
||||
// The following part of the code may contain portions of the code from the Go
|
||||
// standard library, which tells me to retain their copyright notice:
|
||||
//
|
||||
// Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type gc_bin_parser struct {
|
||||
data []byte
|
||||
buf []byte // for reading strings
|
||||
version int
|
||||
|
||||
// object lists
|
||||
strList []string // in order of appearance
|
||||
pkgList []string // in order of appearance
|
||||
typList []ast.Expr // in order of appearance
|
||||
callback func(pkg string, decl ast.Decl)
|
||||
pfc *package_file_cache
|
||||
trackAllTypes bool
|
||||
|
||||
// position encoding
|
||||
posInfoFormat bool
|
||||
prevFile string
|
||||
prevLine int
|
||||
|
||||
// debugging support
|
||||
debugFormat bool
|
||||
read int // bytes read
|
||||
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) init(data []byte, pfc *package_file_cache) {
|
||||
p.data = data
|
||||
p.version = -1 // unknown version
|
||||
p.strList = []string{""} // empty string is mapped to 0
|
||||
p.pfc = pfc
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) parse_export(callback func(string, ast.Decl)) {
|
||||
p.callback = callback
|
||||
|
||||
// read version info
|
||||
var versionstr string
|
||||
if b := p.rawByte(); b == 'c' || b == 'd' {
|
||||
// Go1.7 encoding; first byte encodes low-level
|
||||
// encoding format (compact vs debug).
|
||||
// For backward-compatibility only (avoid problems with
|
||||
// old installed packages). Newly compiled packages use
|
||||
// the extensible format string.
|
||||
// TODO(gri) Remove this support eventually; after Go1.8.
|
||||
if b == 'd' {
|
||||
p.debugFormat = true
|
||||
}
|
||||
p.trackAllTypes = p.rawByte() == 'a'
|
||||
p.posInfoFormat = p.int() != 0
|
||||
versionstr = p.string()
|
||||
if versionstr == "v1" {
|
||||
p.version = 0
|
||||
}
|
||||
} else {
|
||||
// Go1.8 extensible encoding
|
||||
// read version string and extract version number (ignore anything after the version number)
|
||||
versionstr = p.rawStringln(b)
|
||||
if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" {
|
||||
if v, err := strconv.Atoi(s[1]); err == nil && v > 0 {
|
||||
p.version = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read version specific flags - extend as necessary
|
||||
switch p.version {
|
||||
// case 4:
|
||||
// ...
|
||||
// fallthrough
|
||||
case 3, 2, 1:
|
||||
// Support for Go 1.8 type aliases will be added very
|
||||
// soon (Oct 2016). In the meantime, we make a
|
||||
// best-effort attempt to read v3 export data, failing
|
||||
// if we encounter a type alias. This allows the
|
||||
// automated builders to make progress since
|
||||
// type aliases are not yet used in practice.
|
||||
// TODO(gri): add support for type aliases.
|
||||
p.debugFormat = p.rawStringln(p.rawByte()) == "debug"
|
||||
p.trackAllTypes = p.int() != 0
|
||||
p.posInfoFormat = p.int() != 0
|
||||
case 0:
|
||||
// Go1.7 encoding format - nothing to do here
|
||||
default:
|
||||
panic(fmt.Errorf("unknown export format version %d (%q)", p.version, versionstr))
|
||||
}
|
||||
|
||||
// --- generic export data ---
|
||||
|
||||
// populate typList with predeclared "known" types
|
||||
p.typList = append(p.typList, predeclared...)
|
||||
|
||||
// read package data
|
||||
p.pfc.defalias = p.pkg()[1:]
|
||||
|
||||
// read objects of phase 1 only (see cmd/compiler/internal/gc/bexport.go)
|
||||
objcount := 0
|
||||
for {
|
||||
tag := p.tagOrIndex()
|
||||
if tag == endTag {
|
||||
break
|
||||
}
|
||||
p.obj(tag)
|
||||
objcount++
|
||||
}
|
||||
|
||||
// self-verification
|
||||
if count := p.int(); count != objcount {
|
||||
panic(fmt.Sprintf("got %d objects; want %d", objcount, count))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) pkg() string {
|
||||
// if the package was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.pkgList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the package tag (< 0)
|
||||
if i != packageTag {
|
||||
panic(fmt.Sprintf("unexpected package tag %d", i))
|
||||
}
|
||||
|
||||
// read package data
|
||||
name := p.string()
|
||||
path := p.string()
|
||||
|
||||
// we should never see an empty package name
|
||||
if name == "" {
|
||||
panic("empty package name in import")
|
||||
}
|
||||
|
||||
// an empty path denotes the package we are currently importing;
|
||||
// it must be the first package we see
|
||||
if (path == "") != (len(p.pkgList) == 0) {
|
||||
panic(fmt.Sprintf("package path %q for pkg index %d", path, len(p.pkgList)))
|
||||
}
|
||||
|
||||
var fullName string
|
||||
if path != "" {
|
||||
fullName = "!" + path + "!" + name
|
||||
p.pfc.add_package_to_scope(fullName, path)
|
||||
} else {
|
||||
fullName = "#" + name
|
||||
}
|
||||
|
||||
// if the package was imported before, use that one; otherwise create a new one
|
||||
p.pkgList = append(p.pkgList, fullName)
|
||||
return p.pkgList[len(p.pkgList)-1]
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) obj(tag int) {
|
||||
switch tag {
|
||||
case constTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ("")
|
||||
p.skipValue() // ignore const value, gocode's not interested
|
||||
p.callback(pkg, &ast.GenDecl{
|
||||
Tok: token.CONST,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
|
||||
},
|
||||
},
|
||||
})
|
||||
case typeTag:
|
||||
_ = p.typ("")
|
||||
|
||||
case varTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
typ := p.typ("")
|
||||
p.callback(pkg, &ast.GenDecl{
|
||||
Tok: token.VAR,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
})
|
||||
case funcTag:
|
||||
p.pos()
|
||||
pkg, name := p.qualifiedName()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
p.callback(pkg, &ast.FuncDecl{
|
||||
Name: ast.NewIdent(name),
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
})
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected object tag %d", tag))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) pos() {
|
||||
if !p.posInfoFormat {
|
||||
return
|
||||
}
|
||||
|
||||
file := p.prevFile
|
||||
line := p.prevLine
|
||||
if delta := p.int(); delta != 0 {
|
||||
// line changed
|
||||
line += delta
|
||||
} else if n := p.int(); n >= 0 {
|
||||
// file changed
|
||||
file = p.prevFile[:n] + p.string()
|
||||
p.prevFile = file
|
||||
line = p.int()
|
||||
}
|
||||
p.prevLine = line
|
||||
|
||||
// TODO(gri) register new position
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) qualifiedName() (pkg string, name string) {
|
||||
name = p.string()
|
||||
pkg = p.pkg()
|
||||
return pkg, name
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) reserveMaybe() int {
|
||||
if p.trackAllTypes {
|
||||
p.typList = append(p.typList, nil)
|
||||
return len(p.typList) - 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) recordMaybe(idx int, t ast.Expr) ast.Expr {
|
||||
if idx == -1 {
|
||||
return t
|
||||
}
|
||||
p.typList[idx] = t
|
||||
return t
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) record(t ast.Expr) {
|
||||
p.typList = append(p.typList, t)
|
||||
}
|
||||
|
||||
// parent is the package which declared the type; parent == nil means
|
||||
// the package currently imported. The parent package is needed for
|
||||
// exported struct fields and interface methods which don't contain
|
||||
// explicit package information in the export data.
|
||||
func (p *gc_bin_parser) typ(parent string) ast.Expr {
|
||||
// if the type was seen before, i is its index (>= 0)
|
||||
i := p.tagOrIndex()
|
||||
if i >= 0 {
|
||||
return p.typList[i]
|
||||
}
|
||||
|
||||
// otherwise, i is the type tag (< 0)
|
||||
switch i {
|
||||
case namedTag:
|
||||
// read type object
|
||||
p.pos()
|
||||
parent, name := p.qualifiedName()
|
||||
tdecl := &ast.GenDecl{
|
||||
Tok: token.TYPE,
|
||||
Specs: []ast.Spec{
|
||||
&ast.TypeSpec{
|
||||
Name: ast.NewIdent(name),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// record it right away (underlying type can contain refs to t)
|
||||
t := &ast.SelectorExpr{X: ast.NewIdent(parent), Sel: ast.NewIdent(name)}
|
||||
p.record(t)
|
||||
|
||||
// parse underlying type
|
||||
t0 := p.typ(parent)
|
||||
tdecl.Specs[0].(*ast.TypeSpec).Type = t0
|
||||
|
||||
p.callback(parent, tdecl)
|
||||
|
||||
// interfaces have no methods
|
||||
if _, ok := t0.(*ast.InterfaceType); ok {
|
||||
return t
|
||||
}
|
||||
|
||||
// read associated methods
|
||||
for i := p.int(); i > 0; i-- {
|
||||
// TODO(gri) replace this with something closer to fieldName
|
||||
p.pos()
|
||||
name := p.string()
|
||||
if !exported(name) {
|
||||
p.pkg()
|
||||
}
|
||||
|
||||
recv := p.paramList()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
p.int() // go:nointerface pragma - discarded
|
||||
|
||||
strip_method_receiver(recv)
|
||||
p.callback(parent, &ast.FuncDecl{
|
||||
Recv: recv,
|
||||
Name: ast.NewIdent(name),
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
})
|
||||
}
|
||||
return t
|
||||
case arrayTag:
|
||||
i := p.reserveMaybe()
|
||||
n := p.int64()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ArrayType{
|
||||
Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprint(n)},
|
||||
Elt: elt,
|
||||
})
|
||||
|
||||
case sliceTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ArrayType{Len: nil, Elt: elt})
|
||||
|
||||
case dddTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.Ellipsis{Elt: elt})
|
||||
|
||||
case structTag:
|
||||
i := p.reserveMaybe()
|
||||
return p.recordMaybe(i, p.structType(parent))
|
||||
|
||||
case pointerTag:
|
||||
i := p.reserveMaybe()
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.StarExpr{X: elt})
|
||||
|
||||
case signatureTag:
|
||||
i := p.reserveMaybe()
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
return p.recordMaybe(i, &ast.FuncType{Params: params, Results: results})
|
||||
|
||||
case interfaceTag:
|
||||
i := p.reserveMaybe()
|
||||
if p.int() != 0 {
|
||||
panic("unexpected embedded interface")
|
||||
}
|
||||
methods := p.methodList(parent)
|
||||
return p.recordMaybe(i, &ast.InterfaceType{Methods: &ast.FieldList{List: methods}})
|
||||
|
||||
case mapTag:
|
||||
i := p.reserveMaybe()
|
||||
key := p.typ(parent)
|
||||
val := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.MapType{Key: key, Value: val})
|
||||
|
||||
case chanTag:
|
||||
i := p.reserveMaybe()
|
||||
dir := ast.SEND | ast.RECV
|
||||
switch d := p.int(); d {
|
||||
case 1:
|
||||
dir = ast.RECV
|
||||
case 2:
|
||||
dir = ast.SEND
|
||||
case 3:
|
||||
// already set
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected channel dir %d", d))
|
||||
}
|
||||
elt := p.typ(parent)
|
||||
return p.recordMaybe(i, &ast.ChanType{Dir: dir, Value: elt})
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected type tag %d", i))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) structType(parent string) *ast.StructType {
|
||||
var fields []*ast.Field
|
||||
if n := p.int(); n > 0 {
|
||||
fields = make([]*ast.Field, n)
|
||||
for i := range fields {
|
||||
fields[i] = p.field(parent)
|
||||
p.string() // tag, not interested in tags
|
||||
}
|
||||
}
|
||||
return &ast.StructType{Fields: &ast.FieldList{List: fields}}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) field(parent string) *ast.Field {
|
||||
p.pos()
|
||||
_, name := p.fieldName(parent)
|
||||
typ := p.typ(parent)
|
||||
|
||||
var names []*ast.Ident
|
||||
if name != "" {
|
||||
names = []*ast.Ident{ast.NewIdent(name)}
|
||||
}
|
||||
return &ast.Field{
|
||||
Names: names,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) methodList(parent string) (methods []*ast.Field) {
|
||||
if n := p.int(); n > 0 {
|
||||
methods = make([]*ast.Field, n)
|
||||
for i := range methods {
|
||||
methods[i] = p.method(parent)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) method(parent string) *ast.Field {
|
||||
p.pos()
|
||||
_, name := p.fieldName(parent)
|
||||
params := p.paramList()
|
||||
results := p.paramList()
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: &ast.FuncType{Params: params, Results: results},
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) fieldName(parent string) (string, string) {
|
||||
name := p.string()
|
||||
pkg := parent
|
||||
if p.version == 0 && name == "_" {
|
||||
// versions < 1 don't export a package for _ fields
|
||||
// TODO: remove once versions are not supported anymore
|
||||
return pkg, name
|
||||
}
|
||||
if name != "" && !exported(name) {
|
||||
// explicitly qualified field
|
||||
if name == "?" {
|
||||
name = ""
|
||||
}
|
||||
pkg = p.pkg()
|
||||
}
|
||||
return pkg, name
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) paramList() *ast.FieldList {
|
||||
n := p.int()
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
// negative length indicates unnamed parameters
|
||||
named := true
|
||||
if n < 0 {
|
||||
n = -n
|
||||
named = false
|
||||
}
|
||||
// n > 0
|
||||
flds := make([]*ast.Field, n)
|
||||
for i := range flds {
|
||||
flds[i] = p.param(named)
|
||||
}
|
||||
return &ast.FieldList{List: flds}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) param(named bool) *ast.Field {
|
||||
t := p.typ("")
|
||||
|
||||
name := "?"
|
||||
if named {
|
||||
name = p.string()
|
||||
if name == "" {
|
||||
panic("expected named parameter")
|
||||
}
|
||||
if name != "_" {
|
||||
p.pkg()
|
||||
}
|
||||
if i := strings.Index(name, "·"); i > 0 {
|
||||
name = name[:i] // cut off gc-specific parameter numbering
|
||||
}
|
||||
}
|
||||
|
||||
// read and discard compiler-specific info
|
||||
p.string()
|
||||
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: t,
|
||||
}
|
||||
}
|
||||
|
||||
func exported(name string) bool {
|
||||
ch, _ := utf8.DecodeRuneInString(name)
|
||||
return unicode.IsUpper(ch)
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) skipValue() {
|
||||
switch tag := p.tagOrIndex(); tag {
|
||||
case falseTag, trueTag:
|
||||
case int64Tag:
|
||||
p.int64()
|
||||
case floatTag:
|
||||
p.float()
|
||||
case complexTag:
|
||||
p.float()
|
||||
p.float()
|
||||
case stringTag:
|
||||
p.string()
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected value tag %d", tag))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) float() {
|
||||
sign := p.int()
|
||||
if sign == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
p.int() // exp
|
||||
p.string() // mant
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Low-level decoders
|
||||
|
||||
func (p *gc_bin_parser) tagOrIndex() int {
|
||||
if p.debugFormat {
|
||||
p.marker('t')
|
||||
}
|
||||
|
||||
return int(p.rawInt64())
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) int() int {
|
||||
x := p.int64()
|
||||
if int64(int(x)) != x {
|
||||
panic("exported integer too large")
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) int64() int64 {
|
||||
if p.debugFormat {
|
||||
p.marker('i')
|
||||
}
|
||||
|
||||
return p.rawInt64()
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) string() string {
|
||||
if p.debugFormat {
|
||||
p.marker('s')
|
||||
}
|
||||
// if the string was seen before, i is its index (>= 0)
|
||||
// (the empty string is at index 0)
|
||||
i := p.rawInt64()
|
||||
if i >= 0 {
|
||||
return p.strList[i]
|
||||
}
|
||||
// otherwise, i is the negative string length (< 0)
|
||||
if n := int(-i); n <= cap(p.buf) {
|
||||
p.buf = p.buf[:n]
|
||||
} else {
|
||||
p.buf = make([]byte, n)
|
||||
}
|
||||
for i := range p.buf {
|
||||
p.buf[i] = p.rawByte()
|
||||
}
|
||||
s := string(p.buf)
|
||||
p.strList = append(p.strList, s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *gc_bin_parser) marker(want byte) {
|
||||
if got := p.rawByte(); got != want {
|
||||
panic(fmt.Sprintf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read))
|
||||
}
|
||||
|
||||
pos := p.read
|
||||
if n := int(p.rawInt64()); n != pos {
|
||||
panic(fmt.Sprintf("incorrect position: got %d; want %d", n, pos))
|
||||
}
|
||||
}
|
||||
|
||||
// rawInt64 should only be used by low-level decoders.
|
||||
func (p *gc_bin_parser) rawInt64() int64 {
|
||||
i, err := binary.ReadVarint(p)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("read error: %v", err))
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// rawStringln should only be used to read the initial version string.
|
||||
func (p *gc_bin_parser) rawStringln(b byte) string {
|
||||
p.buf = p.buf[:0]
|
||||
for b != '\n' {
|
||||
p.buf = append(p.buf, b)
|
||||
b = p.rawByte()
|
||||
}
|
||||
return string(p.buf)
|
||||
}
|
||||
|
||||
// needed for binary.ReadVarint in rawInt64
|
||||
func (p *gc_bin_parser) ReadByte() (byte, error) {
|
||||
return p.rawByte(), nil
|
||||
}
|
||||
|
||||
// byte is the bottleneck interface for reading p.data.
|
||||
// It unescapes '|' 'S' to '$' and '|' '|' to '|'.
|
||||
// rawByte should only be used by low-level decoders.
|
||||
func (p *gc_bin_parser) rawByte() byte {
|
||||
b := p.data[0]
|
||||
r := 1
|
||||
if b == '|' {
|
||||
b = p.data[1]
|
||||
r = 2
|
||||
switch b {
|
||||
case 'S':
|
||||
b = '$'
|
||||
case '|':
|
||||
// nothing to do
|
||||
default:
|
||||
panic("unexpected escape sequence in export data")
|
||||
}
|
||||
}
|
||||
p.data = p.data[r:]
|
||||
p.read += r
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Export format
|
||||
|
||||
// Tags. Must be < 0.
|
||||
const (
|
||||
// Objects
|
||||
packageTag = -(iota + 1)
|
||||
constTag
|
||||
typeTag
|
||||
varTag
|
||||
funcTag
|
||||
endTag
|
||||
|
||||
// Types
|
||||
namedTag
|
||||
arrayTag
|
||||
sliceTag
|
||||
dddTag
|
||||
structTag
|
||||
pointerTag
|
||||
signatureTag
|
||||
interfaceTag
|
||||
mapTag
|
||||
chanTag
|
||||
|
||||
// Values
|
||||
falseTag
|
||||
trueTag
|
||||
int64Tag
|
||||
floatTag
|
||||
fractionTag // not used by gc
|
||||
complexTag
|
||||
stringTag
|
||||
unknownTag // not used by gc (only appears in packages with errors)
|
||||
)
|
||||
|
||||
var predeclared = []ast.Expr{
|
||||
// basic types
|
||||
ast.NewIdent("bool"),
|
||||
ast.NewIdent("int"),
|
||||
ast.NewIdent("int8"),
|
||||
ast.NewIdent("int16"),
|
||||
ast.NewIdent("int32"),
|
||||
ast.NewIdent("int64"),
|
||||
ast.NewIdent("uint"),
|
||||
ast.NewIdent("uint8"),
|
||||
ast.NewIdent("uint16"),
|
||||
ast.NewIdent("uint32"),
|
||||
ast.NewIdent("uint64"),
|
||||
ast.NewIdent("uintptr"),
|
||||
ast.NewIdent("float32"),
|
||||
ast.NewIdent("float64"),
|
||||
ast.NewIdent("complex64"),
|
||||
ast.NewIdent("complex128"),
|
||||
ast.NewIdent("string"),
|
||||
|
||||
// aliases
|
||||
ast.NewIdent("byte"),
|
||||
ast.NewIdent("rune"),
|
||||
|
||||
// error
|
||||
ast.NewIdent("error"),
|
||||
|
||||
// TODO(nsf): don't think those are used in just package type info,
|
||||
// maybe for consts, but we are not interested in that
|
||||
// untyped types
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedBool],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedInt],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedRune],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedFloat],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedComplex],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedString],
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.UntypedNil],
|
||||
|
||||
// package unsafe
|
||||
&ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")},
|
||||
|
||||
// invalid type
|
||||
ast.NewIdent(">_<"), // TODO: types.Typ[types.Invalid], // only appears in packages with errors
|
||||
|
||||
// used internally by gc; never used by this package or in .a files
|
||||
ast.NewIdent("any"),
|
||||
}
|
|
@ -0,0 +1,678 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"text/scanner"
|
||||
)
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// gc_parser
|
||||
//
|
||||
// The following part of the code may contain portions of the code from the Go
|
||||
// standard library, which tells me to retain their copyright notice:
|
||||
//
|
||||
// Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type gc_parser struct {
|
||||
scanner scanner.Scanner
|
||||
tok rune
|
||||
lit string
|
||||
path_to_name map[string]string
|
||||
beautify bool
|
||||
pfc *package_file_cache
|
||||
}
|
||||
|
||||
func (p *gc_parser) init(data []byte, pfc *package_file_cache) {
|
||||
p.scanner.Init(bytes.NewReader(data))
|
||||
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
|
||||
p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanStrings |
|
||||
scanner.ScanComments | scanner.ScanChars | scanner.SkipComments
|
||||
p.scanner.Whitespace = 1<<'\t' | 1<<' ' | 1<<'\r' | 1<<'\v' | 1<<'\f'
|
||||
p.scanner.Filename = "package.go"
|
||||
p.next()
|
||||
// and the built-in "unsafe" package to the path_to_name map
|
||||
p.path_to_name = map[string]string{"unsafe": "unsafe"}
|
||||
p.pfc = pfc
|
||||
}
|
||||
|
||||
func (p *gc_parser) next() {
|
||||
p.tok = p.scanner.Scan()
|
||||
switch p.tok {
|
||||
case scanner.Ident, scanner.Int, scanner.String:
|
||||
p.lit = p.scanner.TokenText()
|
||||
default:
|
||||
p.lit = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_parser) error(msg string) {
|
||||
panic(errors.New(msg))
|
||||
}
|
||||
|
||||
func (p *gc_parser) errorf(format string, args ...interface{}) {
|
||||
p.error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect(tok rune) string {
|
||||
lit := p.lit
|
||||
if p.tok != tok {
|
||||
p.errorf("expected %s, got %s (%q)", scanner.TokenString(tok),
|
||||
scanner.TokenString(p.tok), lit)
|
||||
}
|
||||
p.next()
|
||||
return lit
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect_keyword(keyword string) {
|
||||
lit := p.expect(scanner.Ident)
|
||||
if lit != keyword {
|
||||
p.errorf("expected keyword: %s, got: %q", keyword, lit)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gc_parser) expect_special(what string) {
|
||||
i := 0
|
||||
for i < len(what) {
|
||||
if p.tok != rune(what[i]) {
|
||||
break
|
||||
}
|
||||
|
||||
nc := p.scanner.Peek()
|
||||
if i != len(what)-1 && nc <= ' ' {
|
||||
break
|
||||
}
|
||||
|
||||
p.next()
|
||||
i++
|
||||
}
|
||||
|
||||
if i < len(what) {
|
||||
p.errorf("expected: %q, got: %q", what, what[0:i])
|
||||
}
|
||||
}
|
||||
|
||||
// dotIdentifier = "?" | ( ident | '·' ) { ident | int | '·' } .
|
||||
// we're doing lexer job here, kind of
|
||||
func (p *gc_parser) parse_dot_ident() string {
|
||||
if p.tok == '?' {
|
||||
p.next()
|
||||
return "?"
|
||||
}
|
||||
|
||||
ident := ""
|
||||
sep := 'x'
|
||||
i, j := 0, -1
|
||||
for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' {
|
||||
ident += p.lit
|
||||
if p.tok == '·' {
|
||||
ident += "·"
|
||||
j = i
|
||||
i++
|
||||
}
|
||||
i += len(p.lit)
|
||||
sep = p.scanner.Peek()
|
||||
p.next()
|
||||
}
|
||||
// middot = \xc2\xb7
|
||||
if j != -1 && i > j+1 {
|
||||
c := ident[j+2]
|
||||
if c >= '0' && c <= '9' {
|
||||
ident = ident[0:j]
|
||||
}
|
||||
}
|
||||
return ident
|
||||
}
|
||||
|
||||
// ImportPath = string_lit .
|
||||
// quoted name of the path, but we return it as an identifier, taking an alias
|
||||
// from 'pathToAlias' map, it is filled by import statements
|
||||
func (p *gc_parser) parse_package() *ast.Ident {
|
||||
path, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return ast.NewIdent(path)
|
||||
}
|
||||
|
||||
// ExportedName = "@" ImportPath "." dotIdentifier .
|
||||
func (p *gc_parser) parse_exported_name() *ast.SelectorExpr {
|
||||
p.expect('@')
|
||||
pkg := p.parse_package()
|
||||
if pkg.Name == "" {
|
||||
pkg.Name = "#" + p.pfc.defalias
|
||||
} else {
|
||||
pkg.Name = p.path_to_name[pkg.Name]
|
||||
}
|
||||
p.expect('.')
|
||||
name := ast.NewIdent(p.parse_dot_ident())
|
||||
return &ast.SelectorExpr{X: pkg, Sel: name}
|
||||
}
|
||||
|
||||
// Name = identifier | "?" | ExportedName .
|
||||
func (p *gc_parser) parse_name() (string, ast.Expr) {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
name := p.lit
|
||||
p.next()
|
||||
return name, ast.NewIdent(name)
|
||||
case '?':
|
||||
p.next()
|
||||
return "?", ast.NewIdent("?")
|
||||
case '@':
|
||||
en := p.parse_exported_name()
|
||||
return en.Sel.Name, en
|
||||
}
|
||||
p.error("name expected")
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Field = Name Type [ string_lit ] .
|
||||
func (p *gc_parser) parse_field() *ast.Field {
|
||||
var tag string
|
||||
name, _ := p.parse_name()
|
||||
typ := p.parse_type()
|
||||
if p.tok == scanner.String {
|
||||
tag = p.expect(scanner.String)
|
||||
}
|
||||
|
||||
var names []*ast.Ident
|
||||
if name != "?" {
|
||||
names = []*ast.Ident{ast.NewIdent(name)}
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Names: names,
|
||||
Type: typ,
|
||||
Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
|
||||
}
|
||||
}
|
||||
|
||||
// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] .
|
||||
func (p *gc_parser) parse_parameter() *ast.Field {
|
||||
// name
|
||||
name, _ := p.parse_name()
|
||||
|
||||
// type
|
||||
var typ ast.Expr
|
||||
if p.tok == '.' {
|
||||
p.expect_special("...")
|
||||
typ = &ast.Ellipsis{Elt: p.parse_type()}
|
||||
} else {
|
||||
typ = p.parse_type()
|
||||
}
|
||||
|
||||
var tag string
|
||||
if p.tok == scanner.String {
|
||||
tag = p.expect(scanner.String)
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
Tag: &ast.BasicLit{Kind: token.STRING, Value: tag},
|
||||
}
|
||||
}
|
||||
|
||||
// Parameters = "(" [ ParameterList ] ")" .
|
||||
// ParameterList = { Parameter "," } Parameter .
|
||||
func (p *gc_parser) parse_parameters() *ast.FieldList {
|
||||
flds := []*ast.Field{}
|
||||
parse_parameter := func() {
|
||||
par := p.parse_parameter()
|
||||
flds = append(flds, par)
|
||||
}
|
||||
|
||||
p.expect('(')
|
||||
if p.tok != ')' {
|
||||
parse_parameter()
|
||||
for p.tok == ',' {
|
||||
p.next()
|
||||
parse_parameter()
|
||||
}
|
||||
}
|
||||
p.expect(')')
|
||||
return &ast.FieldList{List: flds}
|
||||
}
|
||||
|
||||
// Signature = Parameters [ Result ] .
|
||||
// Result = Type | Parameters .
|
||||
func (p *gc_parser) parse_signature() *ast.FuncType {
|
||||
var params *ast.FieldList
|
||||
var results *ast.FieldList
|
||||
|
||||
params = p.parse_parameters()
|
||||
switch p.tok {
|
||||
case scanner.Ident, '[', '*', '<', '@':
|
||||
fld := &ast.Field{Type: p.parse_type()}
|
||||
results = &ast.FieldList{List: []*ast.Field{fld}}
|
||||
case '(':
|
||||
results = p.parse_parameters()
|
||||
}
|
||||
return &ast.FuncType{Params: params, Results: results}
|
||||
}
|
||||
|
||||
// MethodOrEmbedSpec = Name [ Signature ] .
|
||||
func (p *gc_parser) parse_method_or_embed_spec() *ast.Field {
|
||||
name, nameexpr := p.parse_name()
|
||||
if p.tok == '(' {
|
||||
typ := p.parse_signature()
|
||||
return &ast.Field{
|
||||
Names: []*ast.Ident{ast.NewIdent(name)},
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
return &ast.Field{
|
||||
Type: nameexpr,
|
||||
}
|
||||
}
|
||||
|
||||
// int_lit = [ "-" | "+" ] { "0" ... "9" } .
|
||||
func (p *gc_parser) parse_int() {
|
||||
switch p.tok {
|
||||
case '-', '+':
|
||||
p.next()
|
||||
}
|
||||
p.expect(scanner.Int)
|
||||
}
|
||||
|
||||
// number = int_lit [ "p" int_lit ] .
|
||||
func (p *gc_parser) parse_number() {
|
||||
p.parse_int()
|
||||
if p.lit == "p" {
|
||||
p.next()
|
||||
p.parse_int()
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// gc_parser.types
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
// InterfaceType = "interface" "{" [ MethodOrEmbedList ] "}" .
|
||||
// MethodOrEmbedList = MethodOrEmbedSpec { ";" MethodOrEmbedSpec } .
|
||||
func (p *gc_parser) parse_interface_type() ast.Expr {
|
||||
var methods []*ast.Field
|
||||
parse_method := func() {
|
||||
meth := p.parse_method_or_embed_spec()
|
||||
methods = append(methods, meth)
|
||||
}
|
||||
|
||||
p.expect_keyword("interface")
|
||||
p.expect('{')
|
||||
if p.tok != '}' {
|
||||
parse_method()
|
||||
for p.tok == ';' {
|
||||
p.next()
|
||||
parse_method()
|
||||
}
|
||||
}
|
||||
p.expect('}')
|
||||
return &ast.InterfaceType{Methods: &ast.FieldList{List: methods}}
|
||||
}
|
||||
|
||||
// StructType = "struct" "{" [ FieldList ] "}" .
|
||||
// FieldList = Field { ";" Field } .
|
||||
func (p *gc_parser) parse_struct_type() ast.Expr {
|
||||
var fields []*ast.Field
|
||||
parse_field := func() {
|
||||
fld := p.parse_field()
|
||||
fields = append(fields, fld)
|
||||
}
|
||||
|
||||
p.expect_keyword("struct")
|
||||
p.expect('{')
|
||||
if p.tok != '}' {
|
||||
parse_field()
|
||||
for p.tok == ';' {
|
||||
p.next()
|
||||
parse_field()
|
||||
}
|
||||
}
|
||||
p.expect('}')
|
||||
return &ast.StructType{Fields: &ast.FieldList{List: fields}}
|
||||
}
|
||||
|
||||
// MapType = "map" "[" Type "]" Type .
|
||||
func (p *gc_parser) parse_map_type() ast.Expr {
|
||||
p.expect_keyword("map")
|
||||
p.expect('[')
|
||||
key := p.parse_type()
|
||||
p.expect(']')
|
||||
elt := p.parse_type()
|
||||
return &ast.MapType{Key: key, Value: elt}
|
||||
}
|
||||
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
func (p *gc_parser) parse_chan_type() ast.Expr {
|
||||
dir := ast.SEND | ast.RECV
|
||||
if p.tok == scanner.Ident {
|
||||
p.expect_keyword("chan")
|
||||
if p.tok == '<' {
|
||||
p.expect_special("<-")
|
||||
dir = ast.SEND
|
||||
}
|
||||
} else {
|
||||
p.expect_special("<-")
|
||||
p.expect_keyword("chan")
|
||||
dir = ast.RECV
|
||||
}
|
||||
|
||||
elt := p.parse_type()
|
||||
return &ast.ChanType{Dir: dir, Value: elt}
|
||||
}
|
||||
|
||||
// ArrayOrSliceType = ArrayType | SliceType .
|
||||
// ArrayType = "[" int_lit "]" Type .
|
||||
// SliceType = "[" "]" Type .
|
||||
func (p *gc_parser) parse_array_or_slice_type() ast.Expr {
|
||||
p.expect('[')
|
||||
if p.tok == ']' {
|
||||
// SliceType
|
||||
p.next() // skip ']'
|
||||
return &ast.ArrayType{Len: nil, Elt: p.parse_type()}
|
||||
}
|
||||
|
||||
// ArrayType
|
||||
lit := p.expect(scanner.Int)
|
||||
p.expect(']')
|
||||
return &ast.ArrayType{
|
||||
Len: &ast.BasicLit{Kind: token.INT, Value: lit},
|
||||
Elt: p.parse_type(),
|
||||
}
|
||||
}
|
||||
|
||||
// Type =
|
||||
// BasicType | TypeName | ArrayType | SliceType | StructType |
|
||||
// PointerType | FuncType | InterfaceType | MapType | ChanType |
|
||||
// "(" Type ")" .
|
||||
// BasicType = ident .
|
||||
// TypeName = ExportedName .
|
||||
// SliceType = "[" "]" Type .
|
||||
// PointerType = "*" Type .
|
||||
// FuncType = "func" Signature .
|
||||
func (p *gc_parser) parse_type() ast.Expr {
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
switch p.lit {
|
||||
case "struct":
|
||||
return p.parse_struct_type()
|
||||
case "func":
|
||||
p.next()
|
||||
return p.parse_signature()
|
||||
case "interface":
|
||||
return p.parse_interface_type()
|
||||
case "map":
|
||||
return p.parse_map_type()
|
||||
case "chan":
|
||||
return p.parse_chan_type()
|
||||
default:
|
||||
lit := p.lit
|
||||
p.next()
|
||||
return ast.NewIdent(lit)
|
||||
}
|
||||
case '@':
|
||||
return p.parse_exported_name()
|
||||
case '[':
|
||||
return p.parse_array_or_slice_type()
|
||||
case '*':
|
||||
p.next()
|
||||
return &ast.StarExpr{X: p.parse_type()}
|
||||
case '<':
|
||||
return p.parse_chan_type()
|
||||
case '(':
|
||||
p.next()
|
||||
typ := p.parse_type()
|
||||
p.expect(')')
|
||||
return typ
|
||||
}
|
||||
p.errorf("unexpected token: %s", scanner.TokenString(p.tok))
|
||||
return nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------------
|
||||
// gc_parser.declarations
|
||||
//-------------------------------------------------------------------------------
|
||||
|
||||
// ImportDecl = "import" identifier string_lit .
|
||||
func (p *gc_parser) parse_import_decl() {
|
||||
p.expect_keyword("import")
|
||||
alias := p.expect(scanner.Ident)
|
||||
path := p.parse_package()
|
||||
fullName := "!" + path.Name + "!" + alias
|
||||
p.path_to_name[path.Name] = fullName
|
||||
p.pfc.add_package_to_scope(fullName, path.Name)
|
||||
}
|
||||
|
||||
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
|
||||
// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
|
||||
// bool_lit = "true" | "false" .
|
||||
// complex_lit = "(" float_lit "+" float_lit ")" .
|
||||
// rune_lit = "(" int_lit "+" int_lit ")" .
|
||||
// string_lit = `"` { unicode_char } `"` .
|
||||
func (p *gc_parser) parse_const_decl() (string, *ast.GenDecl) {
|
||||
// TODO: do we really need actual const value? gocode doesn't use this
|
||||
p.expect_keyword("const")
|
||||
name := p.parse_exported_name()
|
||||
|
||||
var typ ast.Expr
|
||||
if p.tok != '=' {
|
||||
typ = p.parse_type()
|
||||
}
|
||||
|
||||
p.expect('=')
|
||||
|
||||
// skip the value
|
||||
switch p.tok {
|
||||
case scanner.Ident:
|
||||
// must be bool, true or false
|
||||
p.next()
|
||||
case '-', '+', scanner.Int:
|
||||
// number
|
||||
p.parse_number()
|
||||
case '(':
|
||||
// complex_lit or rune_lit
|
||||
p.next() // skip '('
|
||||
if p.tok == scanner.Char {
|
||||
p.next()
|
||||
} else {
|
||||
p.parse_number()
|
||||
}
|
||||
p.expect('+')
|
||||
p.parse_number()
|
||||
p.expect(')')
|
||||
case scanner.Char:
|
||||
p.next()
|
||||
case scanner.String:
|
||||
p.next()
|
||||
default:
|
||||
p.error("expected literal")
|
||||
}
|
||||
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.CONST,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{name.Sel},
|
||||
Type: typ,
|
||||
Values: []ast.Expr{&ast.BasicLit{Kind: token.INT, Value: "0"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TypeDecl = "type" ExportedName Type .
|
||||
func (p *gc_parser) parse_type_decl() (string, *ast.GenDecl) {
|
||||
p.expect_keyword("type")
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_type()
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.TYPE,
|
||||
Specs: []ast.Spec{
|
||||
&ast.TypeSpec{
|
||||
Name: name.Sel,
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// VarDecl = "var" ExportedName Type .
|
||||
func (p *gc_parser) parse_var_decl() (string, *ast.GenDecl) {
|
||||
p.expect_keyword("var")
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_type()
|
||||
return name.X.(*ast.Ident).Name, &ast.GenDecl{
|
||||
Tok: token.VAR,
|
||||
Specs: []ast.Spec{
|
||||
&ast.ValueSpec{
|
||||
Names: []*ast.Ident{name.Sel},
|
||||
Type: typ,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FuncBody = "{" ... "}" .
|
||||
func (p *gc_parser) parse_func_body() {
|
||||
p.expect('{')
|
||||
for i := 1; i > 0; p.next() {
|
||||
switch p.tok {
|
||||
case '{':
|
||||
i++
|
||||
case '}':
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FuncDecl = "func" ExportedName Signature [ FuncBody ] .
|
||||
func (p *gc_parser) parse_func_decl() (string, *ast.FuncDecl) {
|
||||
// "func" was already consumed by lookahead
|
||||
name := p.parse_exported_name()
|
||||
typ := p.parse_signature()
|
||||
if p.tok == '{' {
|
||||
p.parse_func_body()
|
||||
}
|
||||
return name.X.(*ast.Ident).Name, &ast.FuncDecl{
|
||||
Name: name.Sel,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func strip_method_receiver(recv *ast.FieldList) string {
|
||||
var sel *ast.SelectorExpr
|
||||
|
||||
// find selector expression
|
||||
typ := recv.List[0].Type
|
||||
switch t := typ.(type) {
|
||||
case *ast.StarExpr:
|
||||
sel = t.X.(*ast.SelectorExpr)
|
||||
case *ast.SelectorExpr:
|
||||
sel = t
|
||||
}
|
||||
|
||||
// extract package path
|
||||
pkg := sel.X.(*ast.Ident).Name
|
||||
|
||||
// write back stripped type
|
||||
switch t := typ.(type) {
|
||||
case *ast.StarExpr:
|
||||
t.X = sel.Sel
|
||||
case *ast.SelectorExpr:
|
||||
recv.List[0].Type = sel.Sel
|
||||
}
|
||||
|
||||
return pkg
|
||||
}
|
||||
|
||||
// MethodDecl = "func" Receiver Name Signature .
|
||||
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" [ FuncBody ] .
|
||||
func (p *gc_parser) parse_method_decl() (string, *ast.FuncDecl) {
|
||||
recv := p.parse_parameters()
|
||||
pkg := strip_method_receiver(recv)
|
||||
name, _ := p.parse_name()
|
||||
typ := p.parse_signature()
|
||||
if p.tok == '{' {
|
||||
p.parse_func_body()
|
||||
}
|
||||
return pkg, &ast.FuncDecl{
|
||||
Recv: recv,
|
||||
Name: ast.NewIdent(name),
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
|
||||
func (p *gc_parser) parse_decl() (pkg string, decl ast.Decl) {
|
||||
switch p.lit {
|
||||
case "import":
|
||||
p.parse_import_decl()
|
||||
case "const":
|
||||
pkg, decl = p.parse_const_decl()
|
||||
case "type":
|
||||
pkg, decl = p.parse_type_decl()
|
||||
case "var":
|
||||
pkg, decl = p.parse_var_decl()
|
||||
case "func":
|
||||
p.next()
|
||||
if p.tok == '(' {
|
||||
pkg, decl = p.parse_method_decl()
|
||||
} else {
|
||||
pkg, decl = p.parse_func_decl()
|
||||
}
|
||||
}
|
||||
p.expect('\n')
|
||||
return
|
||||
}
|
||||
|
||||
// Export = PackageClause { Decl } "$$" .
|
||||
// PackageClause = "package" identifier [ "safe" ] "\n" .
|
||||
func (p *gc_parser) parse_export(callback func(string, ast.Decl)) {
|
||||
p.expect_keyword("package")
|
||||
p.pfc.defalias = p.expect(scanner.Ident)
|
||||
if p.tok != '\n' {
|
||||
p.expect_keyword("safe")
|
||||
}
|
||||
p.expect('\n')
|
||||
|
||||
for p.tok != '$' && p.tok != scanner.EOF {
|
||||
pkg, decl := p.parse_decl()
|
||||
if decl != nil {
|
||||
callback(pkg, decl)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
// +build !go1.7
|
||||
|
||||
package main
|
||||
|
||||
func init() {
|
||||
knownPackageIdents["context"] = "golang.org/x/net/context"
|
||||
}
|
|
@ -0,0 +1,141 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// All the code in this file serves single purpose:
|
||||
// It separates a function with the cursor inside and the rest of the code. I'm
|
||||
// doing that, because sometimes parser is not able to recover itself from an
|
||||
// error and the autocompletion results become less complete.
|
||||
|
||||
type tok_pos_pair struct {
|
||||
tok token.Token
|
||||
pos token.Pos
|
||||
}
|
||||
|
||||
type tok_collection struct {
|
||||
tokens []tok_pos_pair
|
||||
fset *token.FileSet
|
||||
}
|
||||
|
||||
func (this *tok_collection) next(s *scanner.Scanner) bool {
|
||||
pos, tok, _ := s.Scan()
|
||||
if tok == token.EOF {
|
||||
return false
|
||||
}
|
||||
|
||||
this.tokens = append(this.tokens, tok_pos_pair{tok, pos})
|
||||
return true
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_decl_beg(pos int) int {
|
||||
lowest := 0
|
||||
lowpos := -1
|
||||
lowi := -1
|
||||
cur := 0
|
||||
for i := pos; i >= 0; i-- {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
|
||||
if cur < lowest {
|
||||
lowest = cur
|
||||
lowpos = this.fset.Position(t.pos).Offset
|
||||
lowi = i
|
||||
}
|
||||
}
|
||||
|
||||
cur = lowest
|
||||
for i := lowi - 1; i >= 0; i-- {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
if t.tok == token.SEMICOLON && cur == lowest {
|
||||
lowpos = this.fset.Position(t.pos).Offset
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return lowpos
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_decl_end(pos int) int {
|
||||
highest := 0
|
||||
highpos := -1
|
||||
cur := 0
|
||||
|
||||
if this.tokens[pos].tok == token.LBRACE {
|
||||
pos++
|
||||
}
|
||||
|
||||
for i := pos; i < len(this.tokens); i++ {
|
||||
t := this.tokens[i]
|
||||
switch t.tok {
|
||||
case token.RBRACE:
|
||||
cur++
|
||||
case token.LBRACE:
|
||||
cur--
|
||||
}
|
||||
|
||||
if cur > highest {
|
||||
highest = cur
|
||||
highpos = this.fset.Position(t.pos).Offset
|
||||
}
|
||||
}
|
||||
|
||||
return highpos
|
||||
}
|
||||
|
||||
func (this *tok_collection) find_outermost_scope(cursor int) (int, int) {
|
||||
pos := 0
|
||||
|
||||
for i, t := range this.tokens {
|
||||
if cursor <= this.fset.Position(t.pos).Offset {
|
||||
break
|
||||
}
|
||||
pos = i
|
||||
}
|
||||
|
||||
return this.find_decl_beg(pos), this.find_decl_end(pos)
|
||||
}
|
||||
|
||||
// return new cursor position, file without ripped part and the ripped part itself
|
||||
// variants:
|
||||
// new-cursor, file-without-ripped-part, ripped-part
|
||||
// old-cursor, file, nil
|
||||
func (this *tok_collection) rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
|
||||
this.fset = token.NewFileSet()
|
||||
var s scanner.Scanner
|
||||
s.Init(this.fset.AddFile("", this.fset.Base(), len(file)), file, nil, scanner.ScanComments)
|
||||
for this.next(&s) {
|
||||
}
|
||||
|
||||
beg, end := this.find_outermost_scope(cursor)
|
||||
if beg == -1 || end == -1 {
|
||||
return cursor, file, nil
|
||||
}
|
||||
|
||||
ripped := make([]byte, end+1-beg)
|
||||
copy(ripped, file[beg:end+1])
|
||||
|
||||
newfile := make([]byte, len(file)-len(ripped))
|
||||
copy(newfile, file[:beg])
|
||||
copy(newfile[beg:], file[end+1:])
|
||||
|
||||
return cursor - beg, newfile, ripped
|
||||
}
|
||||
|
||||
func rip_off_decl(file []byte, cursor int) (int, []byte, []byte) {
|
||||
var tc tok_collection
|
||||
return tc.rip_off_decl(file, cursor)
|
||||
}
|
|
@ -0,0 +1,138 @@
|
|||
// WARNING! Autogenerated by goremote, don't touch.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/rpc"
|
||||
)
|
||||
|
||||
type RPC struct {
|
||||
}
|
||||
|
||||
// wrapper for: server_auto_complete
|
||||
|
||||
type Args_auto_complete struct {
|
||||
Arg0 []byte
|
||||
Arg1 string
|
||||
Arg2 int
|
||||
Arg3 go_build_context
|
||||
}
|
||||
type Reply_auto_complete struct {
|
||||
Arg0 []candidate
|
||||
Arg1 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_auto_complete(args *Args_auto_complete, reply *Reply_auto_complete) error {
|
||||
reply.Arg0, reply.Arg1 = server_auto_complete(args.Arg0, args.Arg1, args.Arg2, args.Arg3)
|
||||
return nil
|
||||
}
|
||||
func client_auto_complete(cli *rpc.Client, Arg0 []byte, Arg1 string, Arg2 int, Arg3 go_build_context) (c []candidate, d int) {
|
||||
var args Args_auto_complete
|
||||
var reply Reply_auto_complete
|
||||
args.Arg0 = Arg0
|
||||
args.Arg1 = Arg1
|
||||
args.Arg2 = Arg2
|
||||
args.Arg3 = Arg3
|
||||
err := cli.Call("RPC.RPC_auto_complete", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0, reply.Arg1
|
||||
}
|
||||
|
||||
// wrapper for: server_close
|
||||
|
||||
type Args_close struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_close struct {
|
||||
Arg0 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_close(args *Args_close, reply *Reply_close) error {
|
||||
reply.Arg0 = server_close(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_close(cli *rpc.Client, Arg0 int) int {
|
||||
var args Args_close
|
||||
var reply Reply_close
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_close", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_status
|
||||
|
||||
type Args_status struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_status struct {
|
||||
Arg0 string
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_status(args *Args_status, reply *Reply_status) error {
|
||||
reply.Arg0 = server_status(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_status(cli *rpc.Client, Arg0 int) string {
|
||||
var args Args_status
|
||||
var reply Reply_status
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_status", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_drop_cache
|
||||
|
||||
type Args_drop_cache struct {
|
||||
Arg0 int
|
||||
}
|
||||
type Reply_drop_cache struct {
|
||||
Arg0 int
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_drop_cache(args *Args_drop_cache, reply *Reply_drop_cache) error {
|
||||
reply.Arg0 = server_drop_cache(args.Arg0)
|
||||
return nil
|
||||
}
|
||||
func client_drop_cache(cli *rpc.Client, Arg0 int) int {
|
||||
var args Args_drop_cache
|
||||
var reply Reply_drop_cache
|
||||
args.Arg0 = Arg0
|
||||
err := cli.Call("RPC.RPC_drop_cache", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
||||
|
||||
// wrapper for: server_set
|
||||
|
||||
type Args_set struct {
|
||||
Arg0, Arg1 string
|
||||
}
|
||||
type Reply_set struct {
|
||||
Arg0 string
|
||||
}
|
||||
|
||||
func (r *RPC) RPC_set(args *Args_set, reply *Reply_set) error {
|
||||
reply.Arg0 = server_set(args.Arg0, args.Arg1)
|
||||
return nil
|
||||
}
|
||||
func client_set(cli *rpc.Client, Arg0, Arg1 string) string {
|
||||
var args Args_set
|
||||
var reply Reply_set
|
||||
args.Arg0 = Arg0
|
||||
args.Arg1 = Arg1
|
||||
err := cli.Call("RPC.RPC_set", &args, &reply)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return reply.Arg0
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package main
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// scope
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type scope struct {
|
||||
parent *scope // nil for universe scope
|
||||
entities map[string]*decl
|
||||
}
|
||||
|
||||
func new_scope(outer *scope) *scope {
|
||||
s := new(scope)
|
||||
s.parent = outer
|
||||
s.entities = make(map[string]*decl)
|
||||
return s
|
||||
}
|
||||
|
||||
// returns: new, prev
|
||||
func advance_scope(s *scope) (*scope, *scope) {
|
||||
if len(s.entities) == 0 {
|
||||
return s, s.parent
|
||||
}
|
||||
return new_scope(s), s
|
||||
}
|
||||
|
||||
// adds declaration or returns an existing one
|
||||
func (s *scope) add_named_decl(d *decl) *decl {
|
||||
return s.add_decl(d.name, d)
|
||||
}
|
||||
|
||||
func (s *scope) add_decl(name string, d *decl) *decl {
|
||||
decl, ok := s.entities[name]
|
||||
if !ok {
|
||||
s.entities[name] = d
|
||||
return d
|
||||
}
|
||||
return decl
|
||||
}
|
||||
|
||||
func (s *scope) replace_decl(name string, d *decl) {
|
||||
s.entities[name] = d
|
||||
}
|
||||
|
||||
func (s *scope) merge_decl(d *decl) {
|
||||
decl, ok := s.entities[d.name]
|
||||
if !ok {
|
||||
s.entities[d.name] = d
|
||||
} else {
|
||||
decl := decl.deep_copy()
|
||||
decl.expand_or_replace(d)
|
||||
s.entities[d.name] = decl
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scope) lookup(name string) *decl {
|
||||
decl, ok := s.entities[name]
|
||||
if !ok {
|
||||
if s.parent != nil {
|
||||
return s.parent.lookup(name)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return decl
|
||||
}
|
|
@ -0,0 +1,237 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"log"
|
||||
"net"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
func do_server() int {
|
||||
g_config.read()
|
||||
if g_config.ForceDebugOutput != "" {
|
||||
// forcefully enable debugging and redirect logging into the
|
||||
// specified file
|
||||
*g_debug = true
|
||||
f, err := os.Create(g_config.ForceDebugOutput)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.SetOutput(f)
|
||||
}
|
||||
|
||||
addr := *g_addr
|
||||
if *g_sock == "unix" {
|
||||
addr = get_socket_filename()
|
||||
if file_exists(addr) {
|
||||
log.Printf("unix socket: '%s' already exists\n", addr)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
g_daemon = new_daemon(*g_sock, addr)
|
||||
if *g_sock == "unix" {
|
||||
// cleanup unix socket file
|
||||
defer os.Remove(addr)
|
||||
}
|
||||
|
||||
rpc.Register(new(RPC))
|
||||
|
||||
g_daemon.loop()
|
||||
return 0
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// daemon
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type daemon struct {
|
||||
listener net.Listener
|
||||
cmd_in chan int
|
||||
autocomplete *auto_complete_context
|
||||
pkgcache package_cache
|
||||
declcache *decl_cache
|
||||
context package_lookup_context
|
||||
}
|
||||
|
||||
func new_daemon(network, address string) *daemon {
|
||||
var err error
|
||||
|
||||
d := new(daemon)
|
||||
d.listener, err = net.Listen(network, address)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
d.cmd_in = make(chan int, 1)
|
||||
d.pkgcache = new_package_cache()
|
||||
d.declcache = new_decl_cache(&d.context)
|
||||
d.autocomplete = new_auto_complete_context(d.pkgcache, d.declcache)
|
||||
return d
|
||||
}
|
||||
|
||||
func (this *daemon) drop_cache() {
|
||||
this.pkgcache = new_package_cache()
|
||||
this.declcache = new_decl_cache(&this.context)
|
||||
this.autocomplete = new_auto_complete_context(this.pkgcache, this.declcache)
|
||||
}
|
||||
|
||||
const (
|
||||
daemon_close = iota
|
||||
)
|
||||
|
||||
func (this *daemon) loop() {
|
||||
conn_in := make(chan net.Conn)
|
||||
go func() {
|
||||
for {
|
||||
c, err := this.listener.Accept()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conn_in <- c
|
||||
}
|
||||
}()
|
||||
|
||||
timeout := time.Duration(g_config.CloseTimeout) * time.Second
|
||||
countdown := time.NewTimer(timeout)
|
||||
|
||||
for {
|
||||
// handle connections or server CMDs (currently one CMD)
|
||||
select {
|
||||
case c := <-conn_in:
|
||||
rpc.ServeConn(c)
|
||||
countdown.Reset(timeout)
|
||||
runtime.GC()
|
||||
case cmd := <-this.cmd_in:
|
||||
switch cmd {
|
||||
case daemon_close:
|
||||
return
|
||||
}
|
||||
case <-countdown.C:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (this *daemon) close() {
|
||||
this.cmd_in <- daemon_close
|
||||
}
|
||||
|
||||
var g_daemon *daemon
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// server_* functions
|
||||
//
|
||||
// Corresponding client_* functions are autogenerated by goremote.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
func server_auto_complete(file []byte, filename string, cursor int, context_packed go_build_context) (c []candidate, d int) {
|
||||
context := unpack_build_context(&context_packed)
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
print_backtrace(err)
|
||||
c = []candidate{
|
||||
{"PANIC", "PANIC", decl_invalid},
|
||||
}
|
||||
|
||||
// drop cache
|
||||
g_daemon.drop_cache()
|
||||
}
|
||||
}()
|
||||
// TODO: Probably we don't care about comparing all the fields, checking GOROOT and GOPATH
|
||||
// should be enough.
|
||||
if !reflect.DeepEqual(g_daemon.context.Context, context.Context) {
|
||||
g_daemon.context = context
|
||||
g_daemon.drop_cache()
|
||||
}
|
||||
switch g_config.PackageLookupMode {
|
||||
case "bzl":
|
||||
// when package lookup mode is bzl, we set GOPATH to "" explicitly and
|
||||
// BzlProjectRoot becomes valid (or empty)
|
||||
var err error
|
||||
g_daemon.context.GOPATH = ""
|
||||
g_daemon.context.BzlProjectRoot, err = find_bzl_project_root(g_config.LibPath, filename)
|
||||
if *g_debug && err != nil {
|
||||
log.Printf("Bzl project root not found: %s", err)
|
||||
}
|
||||
case "gb":
|
||||
// when package lookup mode is gb, we set GOPATH to "" explicitly and
|
||||
// GBProjectRoot becomes valid (or empty)
|
||||
var err error
|
||||
g_daemon.context.GOPATH = ""
|
||||
g_daemon.context.GBProjectRoot, err = find_gb_project_root(filename)
|
||||
if *g_debug && err != nil {
|
||||
log.Printf("Gb project root not found: %s", err)
|
||||
}
|
||||
case "go":
|
||||
// get current package path for GO15VENDOREXPERIMENT hack
|
||||
g_daemon.context.CurrentPackagePath = ""
|
||||
pkg, err := g_daemon.context.ImportDir(filepath.Dir(filename), build.FindOnly)
|
||||
if err == nil {
|
||||
if *g_debug {
|
||||
log.Printf("Go project path: %s", pkg.ImportPath)
|
||||
}
|
||||
g_daemon.context.CurrentPackagePath = pkg.ImportPath
|
||||
} else if *g_debug {
|
||||
log.Printf("Go project path not found: %s", err)
|
||||
}
|
||||
}
|
||||
if *g_debug {
|
||||
var buf bytes.Buffer
|
||||
log.Printf("Got autocompletion request for '%s'\n", filename)
|
||||
log.Printf("Cursor at: %d\n", cursor)
|
||||
buf.WriteString("-------------------------------------------------------\n")
|
||||
buf.Write(file[:cursor])
|
||||
buf.WriteString("#")
|
||||
buf.Write(file[cursor:])
|
||||
log.Print(buf.String())
|
||||
log.Println("-------------------------------------------------------")
|
||||
}
|
||||
candidates, d := g_daemon.autocomplete.apropos(file, filename, cursor)
|
||||
if *g_debug {
|
||||
log.Printf("Offset: %d\n", d)
|
||||
log.Printf("Number of candidates found: %d\n", len(candidates))
|
||||
log.Printf("Candidates are:\n")
|
||||
for _, c := range candidates {
|
||||
abbr := fmt.Sprintf("%s %s %s", c.Class, c.Name, c.Type)
|
||||
if c.Class == decl_func {
|
||||
abbr = fmt.Sprintf("%s %s%s", c.Class, c.Name, c.Type[len("func"):])
|
||||
}
|
||||
log.Printf(" %s\n", abbr)
|
||||
}
|
||||
log.Println("=======================================================")
|
||||
}
|
||||
return candidates, d
|
||||
}
|
||||
|
||||
func server_close(notused int) int {
|
||||
g_daemon.close()
|
||||
return 0
|
||||
}
|
||||
|
||||
func server_status(notused int) string {
|
||||
return g_daemon.autocomplete.status()
|
||||
}
|
||||
|
||||
func server_drop_cache(notused int) int {
|
||||
// drop cache
|
||||
g_daemon.drop_cache()
|
||||
return 0
|
||||
}
|
||||
|
||||
func server_set(key, value string) string {
|
||||
if key == "\x00" {
|
||||
return g_config.list()
|
||||
} else if value == "\x00" {
|
||||
return g_config.list_option(key)
|
||||
}
|
||||
// drop cache on settings changes
|
||||
g_daemon.drop_cache()
|
||||
return g_config.set_option(key, value)
|
||||
}
|
|
@ -0,0 +1,287 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// our own readdir, which skips the files it cannot lstat
|
||||
func readdir_lstat(name string) ([]os.FileInfo, error) {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names, err := f.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out := make([]os.FileInfo, 0, len(names))
|
||||
for _, lname := range names {
|
||||
s, err := os.Lstat(filepath.Join(name, lname))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
out = append(out, s)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// our other readdir function, only opens and reads
|
||||
func readdir(dirname string) []os.FileInfo {
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// returns truncated 'data' and amount of bytes skipped (for cursor pos adjustment)
|
||||
func filter_out_shebang(data []byte) ([]byte, int) {
|
||||
if len(data) > 2 && data[0] == '#' && data[1] == '!' {
|
||||
newline := bytes.Index(data, []byte("\n"))
|
||||
if newline != -1 && len(data) > newline+1 {
|
||||
return data[newline+1:], newline + 1
|
||||
}
|
||||
}
|
||||
return data, 0
|
||||
}
|
||||
|
||||
func file_exists(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func is_dir(path string) bool {
|
||||
fi, err := os.Stat(path)
|
||||
return err == nil && fi.IsDir()
|
||||
}
|
||||
|
||||
func char_to_byte_offset(s []byte, offset_c int) (offset_b int) {
|
||||
for offset_b = 0; offset_c > 0 && offset_b < len(s); offset_b++ {
|
||||
if utf8.RuneStart(s[offset_b]) {
|
||||
offset_c--
|
||||
}
|
||||
}
|
||||
return offset_b
|
||||
}
|
||||
|
||||
func xdg_home_dir() string {
|
||||
xdghome := os.Getenv("XDG_CONFIG_HOME")
|
||||
if xdghome == "" {
|
||||
xdghome = filepath.Join(os.Getenv("HOME"), ".config")
|
||||
}
|
||||
return xdghome
|
||||
}
|
||||
|
||||
func has_prefix(s, prefix string, ignorecase bool) bool {
|
||||
if ignorecase {
|
||||
s = strings.ToLower(s)
|
||||
prefix = strings.ToLower(prefix)
|
||||
}
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
|
||||
func find_bzl_project_root(libpath, path string) (string, error) {
|
||||
if libpath == "" {
|
||||
return "", fmt.Errorf("could not find project root, libpath is empty")
|
||||
}
|
||||
|
||||
pathMap := map[string]struct{}{}
|
||||
for _, lp := range strings.Split(libpath, ":") {
|
||||
lp := strings.TrimSpace(lp)
|
||||
pathMap[filepath.Clean(lp)] = struct{}{}
|
||||
}
|
||||
|
||||
path = filepath.Dir(path)
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("project root is blank")
|
||||
}
|
||||
|
||||
start := path
|
||||
for path != "/" {
|
||||
if _, ok := pathMap[filepath.Clean(path)]; ok {
|
||||
return path, nil
|
||||
}
|
||||
path = filepath.Dir(path)
|
||||
}
|
||||
return "", fmt.Errorf("could not find project root in %q or its parents", start)
|
||||
}
|
||||
|
||||
// Code taken directly from `gb`, I hope author doesn't mind.
|
||||
func find_gb_project_root(path string) (string, error) {
|
||||
path = filepath.Dir(path)
|
||||
if path == "" {
|
||||
return "", fmt.Errorf("project root is blank")
|
||||
}
|
||||
start := path
|
||||
for path != "/" {
|
||||
root := filepath.Join(path, "src")
|
||||
if _, err := os.Stat(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
path = filepath.Dir(path)
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
path, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
return "", fmt.Errorf("could not find project root in %q or its parents", start)
|
||||
}
|
||||
|
||||
// vendorlessImportPath returns the devendorized version of the provided import path.
|
||||
// e.g. "foo/bar/vendor/a/b" => "a/b"
|
||||
func vendorlessImportPath(ipath string) string {
|
||||
// Devendorize for use in import statement.
|
||||
if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
|
||||
return ipath[i+len("/vendor/"):]
|
||||
}
|
||||
if strings.HasPrefix(ipath, "vendor/") {
|
||||
return ipath[len("vendor/"):]
|
||||
}
|
||||
return ipath
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// print_backtrace
|
||||
//
|
||||
// a nicer backtrace printer than the default one
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
var g_backtrace_mutex sync.Mutex
|
||||
|
||||
func print_backtrace(err interface{}) {
|
||||
g_backtrace_mutex.Lock()
|
||||
defer g_backtrace_mutex.Unlock()
|
||||
fmt.Printf("panic: %v\n", err)
|
||||
i := 2
|
||||
for {
|
||||
pc, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
f := runtime.FuncForPC(pc)
|
||||
fmt.Printf("%d(%s): %s:%d\n", i-1, f.Name(), file, line)
|
||||
i++
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// File reader goroutine
|
||||
//
|
||||
// It's a bad idea to block multiple goroutines on file I/O. Creates many
|
||||
// threads which fight for HDD. Therefore only single goroutine should read HDD
|
||||
// at the same time.
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type file_read_request struct {
|
||||
filename string
|
||||
out chan file_read_response
|
||||
}
|
||||
|
||||
type file_read_response struct {
|
||||
data []byte
|
||||
error error
|
||||
}
|
||||
|
||||
type file_reader_type struct {
|
||||
in chan file_read_request
|
||||
}
|
||||
|
||||
func new_file_reader() *file_reader_type {
|
||||
this := new(file_reader_type)
|
||||
this.in = make(chan file_read_request)
|
||||
go func() {
|
||||
var rsp file_read_response
|
||||
for {
|
||||
req := <-this.in
|
||||
rsp.data, rsp.error = ioutil.ReadFile(req.filename)
|
||||
req.out <- rsp
|
||||
}
|
||||
}()
|
||||
return this
|
||||
}
|
||||
|
||||
func (this *file_reader_type) read_file(filename string) ([]byte, error) {
|
||||
req := file_read_request{
|
||||
filename,
|
||||
make(chan file_read_response),
|
||||
}
|
||||
this.in <- req
|
||||
rsp := <-req.out
|
||||
return rsp.data, rsp.error
|
||||
}
|
||||
|
||||
var file_reader = new_file_reader()
|
||||
|
||||
//-------------------------------------------------------------------------
|
||||
// copy of the build.Context without func fields
|
||||
//-------------------------------------------------------------------------
|
||||
|
||||
type go_build_context struct {
|
||||
GOARCH string
|
||||
GOOS string
|
||||
GOROOT string
|
||||
GOPATH string
|
||||
CgoEnabled bool
|
||||
UseAllFiles bool
|
||||
Compiler string
|
||||
BuildTags []string
|
||||
ReleaseTags []string
|
||||
InstallSuffix string
|
||||
}
|
||||
|
||||
func pack_build_context(ctx *build.Context) go_build_context {
|
||||
return go_build_context{
|
||||
GOARCH: ctx.GOARCH,
|
||||
GOOS: ctx.GOOS,
|
||||
GOROOT: ctx.GOROOT,
|
||||
GOPATH: ctx.GOPATH,
|
||||
CgoEnabled: ctx.CgoEnabled,
|
||||
UseAllFiles: ctx.UseAllFiles,
|
||||
Compiler: ctx.Compiler,
|
||||
BuildTags: ctx.BuildTags,
|
||||
ReleaseTags: ctx.ReleaseTags,
|
||||
InstallSuffix: ctx.InstallSuffix,
|
||||
}
|
||||
}
|
||||
|
||||
func unpack_build_context(ctx *go_build_context) package_lookup_context {
|
||||
return package_lookup_context{
|
||||
Context: build.Context{
|
||||
GOARCH: ctx.GOARCH,
|
||||
GOOS: ctx.GOOS,
|
||||
GOROOT: ctx.GOROOT,
|
||||
GOPATH: ctx.GOPATH,
|
||||
CgoEnabled: ctx.CgoEnabled,
|
||||
UseAllFiles: ctx.UseAllFiles,
|
||||
Compiler: ctx.Compiler,
|
||||
BuildTags: ctx.BuildTags,
|
||||
ReleaseTags: ctx.ReleaseTags,
|
||||
InstallSuffix: ctx.InstallSuffix,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
Copyright (c) 2015, visualfc <visualfc@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of gotools nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
# gotools
|
||||
liteide golang tools
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
gotools document
|
||||
*/
|
||||
package main
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2011-2015 visualfc <visualfc@gmail.com>. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/visualfc/gotools/astview"
|
||||
"github.com/visualfc/gotools/command"
|
||||
"github.com/visualfc/gotools/docview"
|
||||
"github.com/visualfc/gotools/finddoc"
|
||||
"github.com/visualfc/gotools/goapi"
|
||||
"github.com/visualfc/gotools/goimports"
|
||||
"github.com/visualfc/gotools/gopresent"
|
||||
"github.com/visualfc/gotools/jsonfmt"
|
||||
"github.com/visualfc/gotools/oracle"
|
||||
"github.com/visualfc/gotools/pkgs"
|
||||
"github.com/visualfc/gotools/runcmd"
|
||||
"github.com/visualfc/gotools/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
command.Register(types.Command)
|
||||
command.Register(jsonfmt.Command)
|
||||
command.Register(finddoc.Command)
|
||||
command.Register(runcmd.Command)
|
||||
command.Register(docview.Command)
|
||||
command.Register(astview.Command)
|
||||
command.Register(goimports.Command)
|
||||
command.Register(gopresent.Command)
|
||||
command.Register(goapi.Command)
|
||||
command.Register(pkgs.Command)
|
||||
command.Register(oracle.Command)
|
||||
}
|
||||
|
||||
func main() {
|
||||
command.AppName = "gotools"
|
||||
command.AppVersion = "1.0"
|
||||
command.AppInfo = "Go tools for liteide."
|
||||
command.Main()
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,22 @@
|
|||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package encoding defines an interface for character encodings, such as Shift
|
||||
// JIS and Windows 1252, that can convert to and from UTF-8.
|
||||
//
|
||||
// To convert the bytes of an io.Reader r from the encoding e to UTF-8:
|
||||
// rInUTF8 := transform.NewReader(r, e.NewDecoder())
|
||||
// and to convert from UTF-8 to the encoding e:
|
||||
// wInUTF8 := transform.NewWriter(w, e.NewEncoder())
|
||||
// In both cases, import "golang.org/x/text/transform".
|
||||
//
|
||||
// Encoding implementations are provided in other packages, such as
|
||||
// golang.org/x/text/encoding/charmap and
|
||||
// golang.org/x/text/encoding/japanese.
|
||||
package encoding // import "golang.org/x/text/encoding"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// Encoding is a character set encoding that can be transformed to and from
|
||||
// UTF-8.
|
||||
type Encoding interface {
|
||||
// NewDecoder returns a transformer that converts to UTF-8.
|
||||
//
|
||||
// Transforming source bytes that are not of that encoding will not
|
||||
// result in an error per se. Each byte that cannot be transcoded will
|
||||
// be represented in the output by the UTF-8 encoding of '\uFFFD', the
|
||||
// replacement rune.
|
||||
NewDecoder() transform.Transformer
|
||||
|
||||
// NewEncoder returns a transformer that converts from UTF-8.
|
||||
//
|
||||
// Transforming source bytes that are not valid UTF-8 will not result in
|
||||
// an error per se. Each rune that cannot be transcoded will be
|
||||
// represented in the output by an encoding-specific replacement such as
|
||||
// "\x1a" (the ASCII substitute character) or "\xff\xfd". To return
|
||||
// early with error instead, use transform.Chain to preprocess the data
|
||||
// with a UTF8Validator.
|
||||
NewEncoder() transform.Transformer
|
||||
}
|
||||
|
||||
// ASCIISub is the ASCII substitute character, as recommended by
|
||||
// http://unicode.org/reports/tr36/#Text_Comparison
|
||||
const ASCIISub = '\x1a'
|
||||
|
||||
// Nop is the nop encoding. Its transformed bytes are the same as the source
|
||||
// bytes; it does not replace invalid UTF-8 sequences.
|
||||
var Nop Encoding = nop{}
|
||||
|
||||
type nop struct{}
|
||||
|
||||
func (nop) NewDecoder() transform.Transformer {
|
||||
return transform.Nop
|
||||
}
|
||||
|
||||
func (nop) NewEncoder() transform.Transformer {
|
||||
return transform.Nop
|
||||
}
|
||||
|
||||
// Replacement is the replacement encoding. Decoding from the replacement
|
||||
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
|
||||
// the replacement encoding yields the same as the source bytes except that
|
||||
// invalid UTF-8 is converted to '\uFFFD'.
|
||||
//
|
||||
// It is defined at http://encoding.spec.whatwg.org/#replacement
|
||||
var Replacement Encoding = replacement{}
|
||||
|
||||
type replacement struct{}
|
||||
|
||||
func (replacement) NewDecoder() transform.Transformer {
|
||||
return replacementDecoder{}
|
||||
}
|
||||
|
||||
func (replacement) NewEncoder() transform.Transformer {
|
||||
return replacementEncoder{}
|
||||
}
|
||||
|
||||
type replacementDecoder struct{ transform.NopResetter }
|
||||
|
||||
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(dst) < 3 {
|
||||
return 0, 0, transform.ErrShortDst
|
||||
}
|
||||
if atEOF {
|
||||
const fffd = "\ufffd"
|
||||
dst[0] = fffd[0]
|
||||
dst[1] = fffd[1]
|
||||
dst[2] = fffd[2]
|
||||
nDst = 3
|
||||
}
|
||||
return nDst, len(src), nil
|
||||
}
|
||||
|
||||
type replacementEncoder struct{ transform.NopResetter }
|
||||
|
||||
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, size := rune(0), 0
|
||||
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
r = rune(src[nSrc])
|
||||
|
||||
// Decode a 1-byte rune.
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
|
||||
} else {
|
||||
// Decode a multi-byte rune.
|
||||
r, size = utf8.DecodeRune(src[nSrc:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
r = '\ufffd'
|
||||
}
|
||||
}
|
||||
|
||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
|
||||
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
|
||||
|
||||
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
|
||||
// input byte that is not valid UTF-8.
|
||||
var UTF8Validator transform.Transformer = utf8Validator{}
|
||||
|
||||
type utf8Validator struct{ transform.NopResetter }
|
||||
|
||||
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
if n > len(dst) {
|
||||
n = len(dst)
|
||||
}
|
||||
for i := 0; i < n; {
|
||||
if c := src[i]; c < utf8.RuneSelf {
|
||||
dst[i] = c
|
||||
i++
|
||||
continue
|
||||
}
|
||||
_, size := utf8.DecodeRune(src[i:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
err = ErrInvalidUTF8
|
||||
if !atEOF && !utf8.FullRune(src[i:]) {
|
||||
err = transform.ErrShortSrc
|
||||
}
|
||||
return i, i, err
|
||||
}
|
||||
if i+size > len(dst) {
|
||||
return i, i, transform.ErrShortDst
|
||||
}
|
||||
for ; size > 0; size-- {
|
||||
dst[i] = src[i]
|
||||
i++
|
||||
}
|
||||
}
|
||||
if len(src) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
}
|
||||
return n, n, err
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
type registry struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Updated string `xml:"updated"`
|
||||
Registry []struct {
|
||||
ID string `xml:"id,attr"`
|
||||
Record []struct {
|
||||
Name string `xml:"name"`
|
||||
Xref []struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Data string `xml:"data,attr"`
|
||||
} `xml:"xref"`
|
||||
Desc struct {
|
||||
Data string `xml:",innerxml"`
|
||||
// Any []struct {
|
||||
// Data string `xml:",chardata"`
|
||||
// } `xml:",any"`
|
||||
// Data string `xml:",chardata"`
|
||||
} `xml:"description,"`
|
||||
MIB string `xml:"value"`
|
||||
Alias []string `xml:"alias"`
|
||||
MIME string `xml:"preferred_alias"`
|
||||
} `xml:"record"`
|
||||
} `xml:"registry"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
||||
reg := ®istry{}
|
||||
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
||||
log.Fatalf("Error decoding charset registry: %v", err)
|
||||
}
|
||||
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
||||
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
||||
}
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, rec := range reg.Registry[0].Record {
|
||||
constName := ""
|
||||
for _, a := range rec.Alias {
|
||||
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
|
||||
// Some of the constant definitions have comments in them. Strip those.
|
||||
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
|
||||
}
|
||||
}
|
||||
if constName == "" {
|
||||
switch rec.MIB {
|
||||
case "2085":
|
||||
constName = "HZGB2312" // Not listed as alias for some reason.
|
||||
default:
|
||||
log.Fatalf("No cs alias defined for %s.", rec.MIB)
|
||||
}
|
||||
}
|
||||
if rec.MIME != "" {
|
||||
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
|
||||
}
|
||||
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
|
||||
if len(rec.Desc.Data) > 0 {
|
||||
fmt.Fprint(w, "// ")
|
||||
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
|
||||
inElem := true
|
||||
attr := ""
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
switch x := t.(type) {
|
||||
case xml.CharData:
|
||||
attr = "" // Don't need attribute info.
|
||||
a := bytes.Split([]byte(x), []byte("\n"))
|
||||
for i, b := range a {
|
||||
if b = bytes.TrimSpace(b); len(b) != 0 {
|
||||
if !inElem && i > 0 {
|
||||
fmt.Fprint(w, "\n// ")
|
||||
}
|
||||
inElem = false
|
||||
fmt.Fprintf(w, "%s ", string(b))
|
||||
}
|
||||
}
|
||||
case xml.StartElement:
|
||||
if x.Name.Local == "xref" {
|
||||
inElem = true
|
||||
use := false
|
||||
for _, a := range x.Attr {
|
||||
if a.Name.Local == "type" {
|
||||
use = use || a.Value != "person"
|
||||
}
|
||||
if a.Name.Local == "data" && use {
|
||||
attr = a.Value + " "
|
||||
}
|
||||
}
|
||||
}
|
||||
case xml.EndElement:
|
||||
inElem = false
|
||||
fmt.Fprint(w, attr)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
}
|
||||
for _, x := range rec.Xref {
|
||||
switch x.Type {
|
||||
case "rfc":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
|
||||
case "uri":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
fmt.Fprintln(w, ")")
|
||||
|
||||
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
|
||||
}
|
80
vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
generated
vendored
Normal file
80
vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// Package identifier defines the contract between implementations of Encoding
|
||||
// and Index by defining identifiers that uniquely identify standardized coded
|
||||
// character sets (CCS) and character encoding schemes (CES), which we will
|
||||
// together refer to as encodings, for which Encoding implementations provide
|
||||
// converters to and from UTF-8. This package is typically only of concern to
|
||||
// implementers of Indexes and Encodings.
|
||||
//
|
||||
// One part of the identifier is the MIB code, which is defined by IANA and
|
||||
// uniquely identifies a CCS or CES. Each code is associated with data that
|
||||
// references authorities, official documentation as well as aliases and MIME
|
||||
// names.
|
||||
//
|
||||
// Not all CESs are covered by the IANA registry. The "other" string that is
|
||||
// returned by ID can be used to identify other character sets or versions of
|
||||
// existing ones.
|
||||
//
|
||||
// It is recommended that each package that provides a set of Encodings provide
|
||||
// the All and Common variables to reference all supported encodings and
|
||||
// commonly used subset. This allows Index implementations to include all
|
||||
// available encodings without explicitly referencing or knowing about them.
|
||||
package identifier
|
||||
|
||||
// Note: this package is internal, but could be made public if there is a need
|
||||
// for writing third-party Indexes and Encodings.
|
||||
|
||||
// References:
|
||||
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
|
||||
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
|
||||
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
|
||||
// - http://www.ietf.org/rfc/rfc2978.txt
|
||||
// - http://www.unicode.org/reports/tr22/
|
||||
// - http://www.w3.org/TR/encoding/
|
||||
// - http://www.w3.org/TR/encoding/indexes/encodings.json
|
||||
// - https://encoding.spec.whatwg.org/
|
||||
// - https://tools.ietf.org/html/rfc6657#section-5
|
||||
|
||||
// Interface can be implemented by Encodings to define the CCS or CES for which
|
||||
// it implements conversions.
|
||||
type Interface interface {
|
||||
// ID returns an encoding identifier. Exactly one of the mib and other
|
||||
// values should be non-zero.
|
||||
//
|
||||
// In the usual case it is only necessary to indicate the MIB code. The
|
||||
// other string can be used to specify encodings for which there is no MIB,
|
||||
// such as "x-mac-dingbat".
|
||||
//
|
||||
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
|
||||
ID() (mib MIB, other string)
|
||||
|
||||
// NOTE: the restrictions on the encoding are to allow extending the syntax
|
||||
// with additional information such as versions, vendors and other variants.
|
||||
}
|
||||
|
||||
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
|
||||
// some identifiers for some encodings that are not covered by the IANA
|
||||
// standard.
|
||||
//
|
||||
// See http://www.iana.org/assignments/ianacharset-mib.
|
||||
type MIB uint16
|
||||
|
||||
// These additional MIB types are not defined in IANA. They are added because
|
||||
// they are common and defined within the text repo.
|
||||
const (
|
||||
// Unofficial marks the start of encodings not registered by IANA.
|
||||
Unofficial MIB = 10000 + iota
|
||||
|
||||
// TODO: add Replacement?
|
||||
|
||||
// XUserDefined is the code for x-user-defined.
|
||||
XUserDefined
|
||||
|
||||
// MacintoshCyrillic is the code for x-mac-cyrillic.
|
||||
MacintoshCyrillic
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains code that is shared among encoding implementations.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// Encoding is an implementation of the Encoding interface that adds the String
|
||||
// and ID methods to an existing encoding.
|
||||
type Encoding struct {
|
||||
encoding.Encoding
|
||||
Name string
|
||||
MIB identifier.MIB
|
||||
}
|
||||
|
||||
// _ verifies that Encoding implements identifier.Interface.
|
||||
var _ identifier.Interface = (*Encoding)(nil)
|
||||
|
||||
func (e *Encoding) String() string {
|
||||
return e.Name
|
||||
}
|
||||
|
||||
func (e *Encoding) ID() (mib identifier.MIB, other string) {
|
||||
return e.MIB, ""
|
||||
}
|
||||
|
||||
// SimpleEncoding is an Encoding that combines two Transformers.
|
||||
type SimpleEncoding struct {
|
||||
Decoder transform.Transformer
|
||||
Encoder transform.Transformer
|
||||
}
|
||||
|
||||
func (e *SimpleEncoding) NewDecoder() transform.Transformer {
|
||||
return e.Decoder
|
||||
}
|
||||
|
||||
func (e *SimpleEncoding) NewEncoder() transform.Transformer {
|
||||
return e.Encoder
|
||||
}
|
||||
|
||||
// FuncEncoding is an Encoding that combines two functions returning a new
|
||||
// Transformer.
|
||||
type FuncEncoding struct {
|
||||
Decoder func() transform.Transformer
|
||||
Encoder func() transform.Transformer
|
||||
}
|
||||
|
||||
func (e FuncEncoding) NewDecoder() transform.Transformer {
|
||||
return e.Decoder()
|
||||
}
|
||||
|
||||
func (e FuncEncoding) NewEncoder() transform.Transformer {
|
||||
return e.Encoder()
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package simplifiedchinese
|
||||
|
||||
import (
|
||||
"golang.org/x/text/encoding"
|
||||
)
|
||||
|
||||
// All is a list of all defined encodings in this package.
|
||||
var All = []encoding.Encoding{GB18030, GBK, HZGB2312}
|
|
@ -0,0 +1,280 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package simplifiedchinese
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/internal"
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
var (
|
||||
// GB18030 is the GB18030 encoding.
|
||||
GB18030 encoding.Encoding = &gbk18030
|
||||
// GBK is the GBK encoding. It encodes an extension of the GB2312 character set
|
||||
// and is also known as Code Page 936.
|
||||
GBK encoding.Encoding = &gbk
|
||||
)
|
||||
|
||||
var gbk = internal.Encoding{
|
||||
&internal.SimpleEncoding{
|
||||
gbkDecoder{gb18030: false},
|
||||
gbkEncoder{gb18030: false},
|
||||
},
|
||||
"GBK",
|
||||
identifier.GBK,
|
||||
}
|
||||
|
||||
var gbk18030 = internal.Encoding{
|
||||
&internal.SimpleEncoding{
|
||||
gbkDecoder{gb18030: true},
|
||||
gbkEncoder{gb18030: true},
|
||||
},
|
||||
"GB18030",
|
||||
identifier.GB18030,
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidGB18030 = errors.New("simplifiedchinese: invalid GB18030 encoding")
|
||||
errInvalidGBK = errors.New("simplifiedchinese: invalid GBK encoding")
|
||||
)
|
||||
|
||||
type gbkDecoder struct {
|
||||
transform.NopResetter
|
||||
gb18030 bool
|
||||
}
|
||||
|
||||
func (d gbkDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, size := rune(0), 0
|
||||
loop:
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
switch c0 := src[nSrc]; {
|
||||
case c0 < utf8.RuneSelf:
|
||||
r, size = rune(c0), 1
|
||||
|
||||
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
|
||||
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
|
||||
// says to treat "gbk" as Code Page 936.
|
||||
case c0 == 0x80:
|
||||
r, size = '€', 1
|
||||
|
||||
case c0 < 0xff:
|
||||
if nSrc+1 >= len(src) {
|
||||
err = transform.ErrShortSrc
|
||||
break loop
|
||||
}
|
||||
c1 := src[nSrc+1]
|
||||
switch {
|
||||
case 0x40 <= c1 && c1 < 0x7f:
|
||||
c1 -= 0x40
|
||||
case 0x80 <= c1 && c1 < 0xff:
|
||||
c1 -= 0x41
|
||||
case d.gb18030 && 0x30 <= c1 && c1 < 0x40:
|
||||
if nSrc+3 >= len(src) {
|
||||
err = transform.ErrShortSrc
|
||||
break loop
|
||||
}
|
||||
c2 := src[nSrc+2]
|
||||
if c2 < 0x81 || 0xff <= c2 {
|
||||
err = errInvalidGB18030
|
||||
break loop
|
||||
}
|
||||
c3 := src[nSrc+3]
|
||||
if c3 < 0x30 || 0x3a <= c3 {
|
||||
err = errInvalidGB18030
|
||||
break loop
|
||||
}
|
||||
size = 4
|
||||
r = ((rune(c0-0x81)*10+rune(c1-0x30))*126+rune(c2-0x81))*10 + rune(c3-0x30)
|
||||
if r < 39420 {
|
||||
i, j := 0, len(gb18030)
|
||||
for i < j {
|
||||
h := i + (j-i)/2
|
||||
if r >= rune(gb18030[h][0]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
dec := &gb18030[i-1]
|
||||
r += rune(dec[1]) - rune(dec[0])
|
||||
goto write
|
||||
}
|
||||
r -= 189000
|
||||
if 0 <= r && r < 0x100000 {
|
||||
r += 0x10000
|
||||
goto write
|
||||
}
|
||||
err = errInvalidGB18030
|
||||
break loop
|
||||
default:
|
||||
if d.gb18030 {
|
||||
err = errInvalidGB18030
|
||||
} else {
|
||||
err = errInvalidGBK
|
||||
}
|
||||
break loop
|
||||
}
|
||||
r, size = '\ufffd', 2
|
||||
if i := int(c0-0x81)*190 + int(c1); i < len(decode) {
|
||||
r = rune(decode[i])
|
||||
if r == 0 {
|
||||
r = '\ufffd'
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
if d.gb18030 {
|
||||
err = errInvalidGB18030
|
||||
} else {
|
||||
err = errInvalidGBK
|
||||
}
|
||||
break loop
|
||||
}
|
||||
|
||||
write:
|
||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break loop
|
||||
}
|
||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||
}
|
||||
if atEOF && err == transform.ErrShortSrc {
|
||||
if d.gb18030 {
|
||||
err = errInvalidGB18030
|
||||
} else {
|
||||
err = errInvalidGBK
|
||||
}
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
type gbkEncoder struct {
|
||||
transform.NopResetter
|
||||
gb18030 bool
|
||||
}
|
||||
|
||||
func (e gbkEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, r2, size := rune(0), rune(0), 0
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
r = rune(src[nSrc])
|
||||
|
||||
// Decode a 1-byte rune.
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
|
||||
} else {
|
||||
// Decode a multi-byte rune.
|
||||
r, size = utf8.DecodeRune(src[nSrc:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// func init checks that the switch covers all tables.
|
||||
switch {
|
||||
case encode0Low <= r && r < encode0High:
|
||||
if r2 = rune(encode0[r-encode0Low]); r2 != 0 {
|
||||
goto write2
|
||||
}
|
||||
case encode1Low <= r && r < encode1High:
|
||||
// Microsoft's Code Page 936 extends GBK 1.0 to encode the euro sign U+20AC
|
||||
// as 0x80. The HTML5 specification at http://encoding.spec.whatwg.org/#gbk
|
||||
// says to treat "gbk" as Code Page 936.
|
||||
if r == '€' {
|
||||
r = 0x80
|
||||
goto write1
|
||||
}
|
||||
if r2 = rune(encode1[r-encode1Low]); r2 != 0 {
|
||||
goto write2
|
||||
}
|
||||
case encode2Low <= r && r < encode2High:
|
||||
if r2 = rune(encode2[r-encode2Low]); r2 != 0 {
|
||||
goto write2
|
||||
}
|
||||
case encode3Low <= r && r < encode3High:
|
||||
if r2 = rune(encode3[r-encode3Low]); r2 != 0 {
|
||||
goto write2
|
||||
}
|
||||
case encode4Low <= r && r < encode4High:
|
||||
if r2 = rune(encode4[r-encode4Low]); r2 != 0 {
|
||||
goto write2
|
||||
}
|
||||
}
|
||||
|
||||
if e.gb18030 {
|
||||
if r < 0x10000 {
|
||||
i, j := 0, len(gb18030)
|
||||
for i < j {
|
||||
h := i + (j-i)/2
|
||||
if r >= rune(gb18030[h][1]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
dec := &gb18030[i-1]
|
||||
r += rune(dec[0]) - rune(dec[1])
|
||||
goto write4
|
||||
} else if r < 0x110000 {
|
||||
r += 189000 - 0x10000
|
||||
goto write4
|
||||
}
|
||||
}
|
||||
r = encoding.ASCIISub
|
||||
}
|
||||
|
||||
write1:
|
||||
if nDst >= len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst] = uint8(r)
|
||||
nDst++
|
||||
continue
|
||||
|
||||
write2:
|
||||
if nDst+2 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = uint8(r2 >> 8)
|
||||
dst[nDst+1] = uint8(r2)
|
||||
nDst += 2
|
||||
continue
|
||||
|
||||
write4:
|
||||
if nDst+4 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+3] = uint8(r%10 + 0x30)
|
||||
r /= 10
|
||||
dst[nDst+2] = uint8(r%126 + 0x81)
|
||||
r /= 126
|
||||
dst[nDst+1] = uint8(r%10 + 0x30)
|
||||
r /= 10
|
||||
dst[nDst+0] = uint8(r + 0x81)
|
||||
nDst += 4
|
||||
continue
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Check that the hard-coded encode switch covers all tables.
|
||||
if numEncodeTables != 5 {
|
||||
panic("bad numEncodeTables")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,228 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package simplifiedchinese
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/internal"
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// HZGB2312 is the HZ-GB2312 encoding.
|
||||
var HZGB2312 encoding.Encoding = &hzGB2312
|
||||
|
||||
var hzGB2312 = internal.Encoding{
|
||||
internal.FuncEncoding{hzGB2312NewDecoder, hzGB2312NewEncoder},
|
||||
"HZ-GB2312",
|
||||
identifier.HZGB2312,
|
||||
}
|
||||
|
||||
func hzGB2312NewDecoder() transform.Transformer {
|
||||
return new(hzGB2312Decoder)
|
||||
}
|
||||
|
||||
func hzGB2312NewEncoder() transform.Transformer {
|
||||
return new(hzGB2312Encoder)
|
||||
}
|
||||
|
||||
var errInvalidHZGB2312 = errors.New("simplifiedchinese: invalid HZ-GB2312 encoding")
|
||||
|
||||
const (
|
||||
asciiState = iota
|
||||
gbState
|
||||
)
|
||||
|
||||
type hzGB2312Decoder int
|
||||
|
||||
func (d *hzGB2312Decoder) Reset() {
|
||||
*d = asciiState
|
||||
}
|
||||
|
||||
func (d *hzGB2312Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, size := rune(0), 0
|
||||
loop:
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
c0 := src[nSrc]
|
||||
if c0 >= utf8.RuneSelf {
|
||||
err = errInvalidHZGB2312
|
||||
break loop
|
||||
}
|
||||
|
||||
if c0 == '~' {
|
||||
if nSrc+1 >= len(src) {
|
||||
err = transform.ErrShortSrc
|
||||
break loop
|
||||
}
|
||||
size = 2
|
||||
switch src[nSrc+1] {
|
||||
case '{':
|
||||
*d = gbState
|
||||
continue
|
||||
case '}':
|
||||
*d = asciiState
|
||||
continue
|
||||
case '~':
|
||||
if nDst >= len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break loop
|
||||
}
|
||||
dst[nDst] = '~'
|
||||
nDst++
|
||||
continue
|
||||
case '\n':
|
||||
continue
|
||||
default:
|
||||
err = errInvalidHZGB2312
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if *d == asciiState {
|
||||
r, size = rune(c0), 1
|
||||
} else {
|
||||
if nSrc+1 >= len(src) {
|
||||
err = transform.ErrShortSrc
|
||||
break loop
|
||||
}
|
||||
c1 := src[nSrc+1]
|
||||
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
||||
err = errInvalidHZGB2312
|
||||
break loop
|
||||
}
|
||||
|
||||
r, size = '\ufffd', 2
|
||||
if i := int(c0-0x01)*190 + int(c1+0x3f); i < len(decode) {
|
||||
r = rune(decode[i])
|
||||
if r == 0 {
|
||||
r = '\ufffd'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break loop
|
||||
}
|
||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||
}
|
||||
if atEOF && err == transform.ErrShortSrc {
|
||||
err = errInvalidHZGB2312
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
type hzGB2312Encoder int
|
||||
|
||||
func (d *hzGB2312Encoder) Reset() {
|
||||
*d = asciiState
|
||||
}
|
||||
|
||||
func (e *hzGB2312Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, size := rune(0), 0
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
r = rune(src[nSrc])
|
||||
|
||||
// Decode a 1-byte rune.
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
if r == '~' {
|
||||
if nDst+2 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = '~'
|
||||
dst[nDst+1] = '~'
|
||||
nDst += 2
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
// Decode a multi-byte rune.
|
||||
r, size = utf8.DecodeRune(src[nSrc:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// func init checks that the switch covers all tables.
|
||||
switch {
|
||||
case encode0Low <= r && r < encode0High:
|
||||
if r = rune(encode0[r-encode0Low]); r != 0 {
|
||||
goto writeGB
|
||||
}
|
||||
case encode1Low <= r && r < encode1High:
|
||||
if r = rune(encode1[r-encode1Low]); r != 0 {
|
||||
goto writeGB
|
||||
}
|
||||
case encode2Low <= r && r < encode2High:
|
||||
if r = rune(encode2[r-encode2Low]); r != 0 {
|
||||
goto writeGB
|
||||
}
|
||||
case encode3Low <= r && r < encode3High:
|
||||
if r = rune(encode3[r-encode3Low]); r != 0 {
|
||||
goto writeGB
|
||||
}
|
||||
case encode4Low <= r && r < encode4High:
|
||||
if r = rune(encode4[r-encode4Low]); r != 0 {
|
||||
goto writeGB
|
||||
}
|
||||
}
|
||||
r = encoding.ASCIISub
|
||||
}
|
||||
|
||||
writeASCII:
|
||||
if *e != asciiState {
|
||||
if nDst+3 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
*e = asciiState
|
||||
dst[nDst+0] = '~'
|
||||
dst[nDst+1] = '}'
|
||||
nDst += 2
|
||||
} else if nDst >= len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst] = uint8(r)
|
||||
nDst++
|
||||
continue
|
||||
|
||||
writeGB:
|
||||
c0 := uint8(r>>8) - 0x80
|
||||
c1 := uint8(r) - 0x80
|
||||
if c0 < 0x21 || 0x7e <= c0 || c1 < 0x21 || 0x7f <= c1 {
|
||||
r = encoding.ASCIISub
|
||||
goto writeASCII
|
||||
}
|
||||
if *e == asciiState {
|
||||
if nDst+4 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
*e = gbState
|
||||
dst[nDst+0] = '~'
|
||||
dst[nDst+1] = '{'
|
||||
nDst += 2
|
||||
} else if nDst+2 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = c0
|
||||
dst[nDst+1] = c1
|
||||
nDst += 2
|
||||
continue
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
161
vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
generated
vendored
Normal file
161
vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This program generates tables.go:
|
||||
// go run maketables.go | gofmt > tables.go
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Printf("// generated by go run maketables.go; DO NOT EDIT\n\n")
|
||||
fmt.Printf("// Package simplifiedchinese provides Simplified Chinese encodings such as GBK.\n")
|
||||
fmt.Printf(`package simplifiedchinese // import "golang.org/x/text/encoding/simplifiedchinese"` + "\n\n")
|
||||
|
||||
printGB18030()
|
||||
printGBK()
|
||||
}
|
||||
|
||||
func printGB18030() {
|
||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-gb18030.txt")
|
||||
if err != nil {
|
||||
log.Fatalf("Get: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
fmt.Printf("// gb18030 is the table from http://encoding.spec.whatwg.org/index-gb18030.txt\n")
|
||||
fmt.Printf("var gb18030 = [...][2]uint16{\n")
|
||||
scanner := bufio.NewScanner(res.Body)
|
||||
for scanner.Scan() {
|
||||
s := strings.TrimSpace(scanner.Text())
|
||||
if s == "" || s[0] == '#' {
|
||||
continue
|
||||
}
|
||||
x, y := uint32(0), uint32(0)
|
||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||
log.Fatalf("could not parse %q", s)
|
||||
}
|
||||
if x < 0x10000 && y < 0x10000 {
|
||||
fmt.Printf("\t{0x%04x, 0x%04x},\n", x, y)
|
||||
}
|
||||
}
|
||||
fmt.Printf("}\n\n")
|
||||
}
|
||||
|
||||
func printGBK() {
|
||||
res, err := http.Get("http://encoding.spec.whatwg.org/index-gbk.txt")
|
||||
if err != nil {
|
||||
log.Fatalf("Get: %v", err)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
mapping := [65536]uint16{}
|
||||
reverse := [65536]uint16{}
|
||||
|
||||
scanner := bufio.NewScanner(res.Body)
|
||||
for scanner.Scan() {
|
||||
s := strings.TrimSpace(scanner.Text())
|
||||
if s == "" || s[0] == '#' {
|
||||
continue
|
||||
}
|
||||
x, y := uint16(0), uint16(0)
|
||||
if _, err := fmt.Sscanf(s, "%d 0x%x", &x, &y); err != nil {
|
||||
log.Fatalf("could not parse %q", s)
|
||||
}
|
||||
if x < 0 || 126*190 <= x {
|
||||
log.Fatalf("GBK code %d is out of range", x)
|
||||
}
|
||||
mapping[x] = y
|
||||
if reverse[y] == 0 {
|
||||
c0, c1 := x/190, x%190
|
||||
if c1 >= 0x3f {
|
||||
c1++
|
||||
}
|
||||
reverse[y] = (0x81+c0)<<8 | (0x40 + c1)
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Fatalf("scanner error: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("// decode is the decoding table from GBK code to Unicode.\n")
|
||||
fmt.Printf("// It is defined at http://encoding.spec.whatwg.org/index-gbk.txt\n")
|
||||
fmt.Printf("var decode = [...]uint16{\n")
|
||||
for i, v := range mapping {
|
||||
if v != 0 {
|
||||
fmt.Printf("\t%d: 0x%04X,\n", i, v)
|
||||
}
|
||||
}
|
||||
fmt.Printf("}\n\n")
|
||||
|
||||
// Any run of at least separation continuous zero entries in the reverse map will
|
||||
// be a separate encode table.
|
||||
const separation = 1024
|
||||
|
||||
intervals := []interval(nil)
|
||||
low, high := -1, -1
|
||||
for i, v := range reverse {
|
||||
if v == 0 {
|
||||
continue
|
||||
}
|
||||
if low < 0 {
|
||||
low = i
|
||||
} else if i-high >= separation {
|
||||
if high >= 0 {
|
||||
intervals = append(intervals, interval{low, high})
|
||||
}
|
||||
low = i
|
||||
}
|
||||
high = i + 1
|
||||
}
|
||||
if high >= 0 {
|
||||
intervals = append(intervals, interval{low, high})
|
||||
}
|
||||
sort.Sort(byDecreasingLength(intervals))
|
||||
|
||||
fmt.Printf("const numEncodeTables = %d\n\n", len(intervals))
|
||||
fmt.Printf("// encodeX are the encoding tables from Unicode to GBK code,\n")
|
||||
fmt.Printf("// sorted by decreasing length.\n")
|
||||
for i, v := range intervals {
|
||||
fmt.Printf("// encode%d: %5d entries for runes in [%5d, %5d).\n", i, v.len(), v.low, v.high)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
for i, v := range intervals {
|
||||
fmt.Printf("const encode%dLow, encode%dHigh = %d, %d\n\n", i, i, v.low, v.high)
|
||||
fmt.Printf("var encode%d = [...]uint16{\n", i)
|
||||
for j := v.low; j < v.high; j++ {
|
||||
x := reverse[j]
|
||||
if x == 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("\t%d-%d: 0x%04X,\n", j, v.low, x)
|
||||
}
|
||||
fmt.Printf("}\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
// interval is a half-open interval [low, high).
|
||||
type interval struct {
|
||||
low, high int
|
||||
}
|
||||
|
||||
func (i interval) len() int { return i.high - i.low }
|
||||
|
||||
// byDecreasingLength sorts intervals by decreasing length.
|
||||
type byDecreasingLength []interval
|
||||
|
||||
func (b byDecreasingLength) Len() int { return len(b) }
|
||||
func (b byDecreasingLength) Less(i, j int) bool { return b[i].len() > b[j].len() }
|
||||
func (b byDecreasingLength) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,616 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package transform provides reader and writer wrappers that transform the
|
||||
// bytes passing through as well as various transformations. Example
|
||||
// transformations provided by other packages include normalization and
|
||||
// conversion between character sets.
|
||||
package transform // import "golang.org/x/text/transform"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrShortDst means that the destination buffer was too short to
|
||||
// receive all of the transformed bytes.
|
||||
ErrShortDst = errors.New("transform: short destination buffer")
|
||||
|
||||
// ErrShortSrc means that the source buffer has insufficient data to
|
||||
// complete the transformation.
|
||||
ErrShortSrc = errors.New("transform: short source buffer")
|
||||
|
||||
// errInconsistentByteCount means that Transform returned success (nil
|
||||
// error) but also returned nSrc inconsistent with the src argument.
|
||||
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||
|
||||
// errShortInternal means that an internal buffer is not large enough
|
||||
// to make progress and the Transform operation must be aborted.
|
||||
errShortInternal = errors.New("transform: short internal buffer")
|
||||
)
|
||||
|
||||
// Transformer transforms bytes.
|
||||
type Transformer interface {
|
||||
// Transform writes to dst the transformed bytes read from src, and
|
||||
// returns the number of dst bytes written and src bytes read. The
|
||||
// atEOF argument tells whether src represents the last bytes of the
|
||||
// input.
|
||||
//
|
||||
// Callers should always process the nDst bytes produced and account
|
||||
// for the nSrc bytes consumed before considering the error err.
|
||||
//
|
||||
// A nil error means that all of the transformed bytes (whether freshly
|
||||
// transformed from src or left over from previous Transform calls)
|
||||
// were written to dst. A nil error can be returned regardless of
|
||||
// whether atEOF is true. If err is nil then nSrc must equal len(src);
|
||||
// the converse is not necessarily true.
|
||||
//
|
||||
// ErrShortDst means that dst was too short to receive all of the
|
||||
// transformed bytes. ErrShortSrc means that src had insufficient data
|
||||
// to complete the transformation. If both conditions apply, then
|
||||
// either error may be returned. Other than the error conditions listed
|
||||
// here, implementations are free to report other errors that arise.
|
||||
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||
|
||||
// Reset resets the state and allows a Transformer to be reused.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||
// Reset method.
|
||||
type NopResetter struct{}
|
||||
|
||||
// Reset implements the Reset method of the Transformer interface.
|
||||
func (NopResetter) Reset() {}
|
||||
|
||||
// Reader wraps another io.Reader by transforming the bytes read.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
t Transformer
|
||||
err error
|
||||
|
||||
// dst[dst0:dst1] contains bytes that have been transformed by t but
|
||||
// not yet copied out via Read.
|
||||
dst []byte
|
||||
dst0, dst1 int
|
||||
|
||||
// src[src0:src1] contains bytes that have been read from r but not
|
||||
// yet transformed through t.
|
||||
src []byte
|
||||
src0, src1 int
|
||||
|
||||
// transformComplete is whether the transformation is complete,
|
||||
// regardless of whether or not it was successful.
|
||||
transformComplete bool
|
||||
}
|
||||
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// NewReader returns a new Reader that wraps r by transforming the bytes read
|
||||
// via t. It calls Reset on t.
|
||||
func NewReader(r io.Reader, t Transformer) *Reader {
|
||||
t.Reset()
|
||||
return &Reader{
|
||||
r: r,
|
||||
t: t,
|
||||
dst: make([]byte, defaultBufSize),
|
||||
src: make([]byte, defaultBufSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := 0, error(nil)
|
||||
for {
|
||||
// Copy out any transformed bytes and return the final error if we are done.
|
||||
if r.dst0 != r.dst1 {
|
||||
n = copy(p, r.dst[r.dst0:r.dst1])
|
||||
r.dst0 += n
|
||||
if r.dst0 == r.dst1 && r.transformComplete {
|
||||
return n, r.err
|
||||
}
|
||||
return n, nil
|
||||
} else if r.transformComplete {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// Try to transform some source bytes, or to flush the transformer if we
|
||||
// are out of source bytes. We do this even if r.r.Read returned an error.
|
||||
// As the io.Reader documentation says, "process the n > 0 bytes returned
|
||||
// before considering the error".
|
||||
if r.src0 != r.src1 || r.err != nil {
|
||||
r.dst0 = 0
|
||||
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
|
||||
r.src0 += n
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
if r.src0 != r.src1 {
|
||||
r.err = errInconsistentByteCount
|
||||
}
|
||||
// The Transform call was successful; we are complete if we
|
||||
// cannot read more bytes into src.
|
||||
r.transformComplete = r.err != nil
|
||||
continue
|
||||
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||
// Make room in dst by copying out, and try again.
|
||||
continue
|
||||
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||
// Read more bytes into src via the code below, and try again.
|
||||
default:
|
||||
r.transformComplete = true
|
||||
// The reader error (r.err) takes precedence over the
|
||||
// transformer error (err) unless r.err is nil or io.EOF.
|
||||
if r.err == nil || r.err == io.EOF {
|
||||
r.err = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Move any untransformed source bytes to the start of the buffer
|
||||
// and read more bytes.
|
||||
if r.src0 != 0 {
|
||||
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
|
||||
}
|
||||
n, r.err = r.r.Read(r.src[r.src1:])
|
||||
r.src1 += n
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement ReadByte (and ReadRune??).
|
||||
|
||||
// Writer wraps another io.Writer by transforming the bytes read.
|
||||
// The user needs to call Close to flush unwritten bytes that may
|
||||
// be buffered.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
t Transformer
|
||||
dst []byte
|
||||
|
||||
// src[:n] contains bytes that have not yet passed through t.
|
||||
src []byte
|
||||
n int
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer that wraps w by transforming the bytes written
|
||||
// via t. It calls Reset on t.
|
||||
func NewWriter(w io.Writer, t Transformer) *Writer {
|
||||
t.Reset()
|
||||
return &Writer{
|
||||
w: w,
|
||||
t: t,
|
||||
dst: make([]byte, defaultBufSize),
|
||||
src: make([]byte, defaultBufSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface. If there are not enough
|
||||
// bytes available to complete a Transform, the bytes will be buffered
|
||||
// for the next write. Call Close to convert the remaining bytes.
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
src := data
|
||||
if w.n > 0 {
|
||||
// Append bytes from data to the last remainder.
|
||||
// TODO: limit the amount copied on first try.
|
||||
n = copy(w.src[w.n:], data)
|
||||
w.n += n
|
||||
src = w.src[:w.n]
|
||||
}
|
||||
for {
|
||||
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
|
||||
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||
return n, werr
|
||||
}
|
||||
src = src[nSrc:]
|
||||
if w.n > 0 && len(src) <= n {
|
||||
// Enough bytes from w.src have been consumed. We make src point
|
||||
// to data instead to reduce the copying.
|
||||
w.n = 0
|
||||
n -= len(src)
|
||||
src = data[n:]
|
||||
if n < len(data) && (err == nil || err == ErrShortSrc) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
n += nSrc
|
||||
}
|
||||
switch {
|
||||
case err == ErrShortDst && (nDst > 0 || nSrc > 0):
|
||||
case err == ErrShortSrc && len(src) < len(w.src):
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
// was already set to the number of bytes consumed.
|
||||
if w.n == 0 {
|
||||
n += m
|
||||
}
|
||||
w.n = m
|
||||
return n, nil
|
||||
case err == nil && w.n > 0:
|
||||
return n, errInconsistentByteCount
|
||||
default:
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (w *Writer) Close() error {
|
||||
for src := w.src[:w.n]; len(src) > 0; {
|
||||
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
|
||||
if nDst == 0 {
|
||||
return err
|
||||
}
|
||||
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||
return werr
|
||||
}
|
||||
if err != ErrShortDst {
|
||||
return err
|
||||
}
|
||||
src = src[nSrc:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type nop struct{ NopResetter }
|
||||
|
||||
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := copy(dst, src)
|
||||
if n < len(src) {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return n, n, err
|
||||
}
|
||||
|
||||
type discard struct{ NopResetter }
|
||||
|
||||
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
return 0, len(src), nil
|
||||
}
|
||||
|
||||
var (
|
||||
// Discard is a Transformer for which all Transform calls succeed
|
||||
// by consuming all bytes and writing nothing.
|
||||
Discard Transformer = discard{}
|
||||
|
||||
// Nop is a Transformer that copies src to dst.
|
||||
Nop Transformer = nop{}
|
||||
)
|
||||
|
||||
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
|
||||
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
|
||||
// buffers owned by the chain. The i'th link transforms bytes from the i'th
|
||||
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
|
||||
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
|
||||
type chain struct {
|
||||
link []link
|
||||
err error
|
||||
// errStart is the index at which the error occurred plus 1. Processing
|
||||
// errStart at this level at the next call to Transform. As long as
|
||||
// errStart > 0, chain will not consume any more source bytes.
|
||||
errStart int
|
||||
}
|
||||
|
||||
func (c *chain) fatalError(errIndex int, err error) {
|
||||
if i := errIndex + 1; i > c.errStart {
|
||||
c.errStart = i
|
||||
c.err = err
|
||||
}
|
||||
}
|
||||
|
||||
type link struct {
|
||||
t Transformer
|
||||
// b[p:n] holds the bytes to be transformed by t.
|
||||
b []byte
|
||||
p int
|
||||
n int
|
||||
}
|
||||
|
||||
func (l *link) src() []byte {
|
||||
return l.b[l.p:l.n]
|
||||
}
|
||||
|
||||
func (l *link) dst() []byte {
|
||||
return l.b[l.n:]
|
||||
}
|
||||
|
||||
// Chain returns a Transformer that applies t in sequence.
|
||||
func Chain(t ...Transformer) Transformer {
|
||||
if len(t) == 0 {
|
||||
return nop{}
|
||||
}
|
||||
c := &chain{link: make([]link, len(t)+1)}
|
||||
for i, tt := range t {
|
||||
c.link[i].t = tt
|
||||
}
|
||||
// Allocate intermediate buffers.
|
||||
b := make([][defaultBufSize]byte, len(t)-1)
|
||||
for i := range b {
|
||||
c.link[i+1].b = b[i][:]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Reset resets the state of Chain. It calls Reset on all the Transformers.
|
||||
func (c *chain) Reset() {
|
||||
for i, l := range c.link {
|
||||
if l.t != nil {
|
||||
l.t.Reset()
|
||||
}
|
||||
c.link[i].p, c.link[i].n = 0, 0
|
||||
}
|
||||
}
|
||||
|
||||
// Transform applies the transformers of c in sequence.
|
||||
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
// Set up src and dst in the chain.
|
||||
srcL := &c.link[0]
|
||||
dstL := &c.link[len(c.link)-1]
|
||||
srcL.b, srcL.p, srcL.n = src, 0, len(src)
|
||||
dstL.b, dstL.n = dst, 0
|
||||
var lastFull, needProgress bool // for detecting progress
|
||||
|
||||
// i is the index of the next Transformer to apply, for i in [low, high].
|
||||
// low is the lowest index for which c.link[low] may still produce bytes.
|
||||
// high is the highest index for which c.link[high] has a Transformer.
|
||||
// The error returned by Transform determines whether to increase or
|
||||
// decrease i. We try to completely fill a buffer before converting it.
|
||||
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
|
||||
in, out := &c.link[i], &c.link[i+1]
|
||||
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
|
||||
out.n += nDst
|
||||
in.p += nSrc
|
||||
if i > 0 && in.p == in.n {
|
||||
in.p, in.n = 0, 0
|
||||
}
|
||||
needProgress, lastFull = lastFull, false
|
||||
switch err0 {
|
||||
case ErrShortDst:
|
||||
// Process the destination buffer next. Return if we are already
|
||||
// at the high index.
|
||||
if i == high {
|
||||
return dstL.n, srcL.p, ErrShortDst
|
||||
}
|
||||
if out.n != 0 {
|
||||
i++
|
||||
// If the Transformer at the next index is not able to process any
|
||||
// source bytes there is nothing that can be done to make progress
|
||||
// and the bytes will remain unprocessed. lastFull is used to
|
||||
// detect this and break out of the loop with a fatal error.
|
||||
lastFull = true
|
||||
continue
|
||||
}
|
||||
// The destination buffer was too small, but is completely empty.
|
||||
// Return a fatal error as this transformation can never complete.
|
||||
c.fatalError(i, errShortInternal)
|
||||
case ErrShortSrc:
|
||||
if i == 0 {
|
||||
// Save ErrShortSrc in err. All other errors take precedence.
|
||||
err = ErrShortSrc
|
||||
break
|
||||
}
|
||||
// Source bytes were depleted before filling up the destination buffer.
|
||||
// Verify we made some progress, move the remaining bytes to the errStart
|
||||
// and try to get more source bytes.
|
||||
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
|
||||
// There were not enough source bytes to proceed while the source
|
||||
// buffer cannot hold any more bytes. Return a fatal error as this
|
||||
// transformation can never complete.
|
||||
c.fatalError(i, errShortInternal)
|
||||
break
|
||||
}
|
||||
// in.b is an internal buffer and we can make progress.
|
||||
in.p, in.n = 0, copy(in.b, in.src())
|
||||
fallthrough
|
||||
case nil:
|
||||
// if i == low, we have depleted the bytes at index i or any lower levels.
|
||||
// In that case we increase low and i. In all other cases we decrease i to
|
||||
// fetch more bytes before proceeding to the next index.
|
||||
if i > low {
|
||||
i--
|
||||
continue
|
||||
}
|
||||
default:
|
||||
c.fatalError(i, err0)
|
||||
}
|
||||
// Exhausted level low or fatal error: increase low and continue
|
||||
// to process the bytes accepted so far.
|
||||
i++
|
||||
low = i
|
||||
}
|
||||
|
||||
// If c.errStart > 0, this means we found a fatal error. We will clear
|
||||
// all upstream buffers. At this point, no more progress can be made
|
||||
// downstream, as Transform would have bailed while handling ErrShortDst.
|
||||
if c.errStart > 0 {
|
||||
for i := 1; i < c.errStart; i++ {
|
||||
c.link[i].p, c.link[i].n = 0, 0
|
||||
}
|
||||
err, c.errStart, c.err = c.err, 0, nil
|
||||
}
|
||||
return dstL.n, srcL.p, err
|
||||
}
|
||||
|
||||
// RemoveFunc returns a Transformer that removes from the input all runes r for
|
||||
// which f(r) is true. Illegal bytes in the input are replaced by RuneError.
|
||||
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||
return removeF(f)
|
||||
}
|
||||
|
||||
type removeF func(r rune) bool
|
||||
|
||||
func (removeF) Reset() {}
|
||||
|
||||
// Transform implements the Transformer interface.
|
||||
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
|
||||
|
||||
if r = rune(src[0]); r < utf8.RuneSelf {
|
||||
sz = 1
|
||||
} else {
|
||||
r, sz = utf8.DecodeRune(src)
|
||||
|
||||
if sz == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src) {
|
||||
err = ErrShortSrc
|
||||
break
|
||||
}
|
||||
// We replace illegal bytes with RuneError. Not doing so might
|
||||
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||
// The resulting byte sequence may subsequently contain runes
|
||||
// for which t(r) is true that were passed unnoticed.
|
||||
if !t(r) {
|
||||
if nDst+3 > len(dst) {
|
||||
err = ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += copy(dst[nDst:], "\uFFFD")
|
||||
}
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !t(r) {
|
||||
if nDst+sz > len(dst) {
|
||||
err = ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += copy(dst[nDst:], src[:sz])
|
||||
}
|
||||
nSrc += sz
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||
// of b to the start of the new slice.
|
||||
func grow(b []byte, n int) []byte {
|
||||
m := len(b)
|
||||
if m <= 256 {
|
||||
m *= 2
|
||||
} else {
|
||||
m += m >> 1
|
||||
}
|
||||
buf := make([]byte, m)
|
||||
copy(buf, b[:n])
|
||||
return buf
|
||||
}
|
||||
|
||||
const initialBufSize = 128
|
||||
|
||||
// String returns a string with the result of converting s[:n] using t, where
|
||||
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
|
||||
func String(t Transformer, s string) (result string, n int, err error) {
|
||||
if s == "" {
|
||||
return "", 0, nil
|
||||
}
|
||||
|
||||
t.Reset()
|
||||
|
||||
// Allocate only once. Note that both dst and src escape when passed to
|
||||
// Transform.
|
||||
buf := [2 * initialBufSize]byte{}
|
||||
dst := buf[:initialBufSize:initialBufSize]
|
||||
src := buf[initialBufSize : 2*initialBufSize]
|
||||
|
||||
// Avoid allocation if the transformed string is identical to the original.
|
||||
// After this loop, pDst will point to the furthest point in s for which it
|
||||
// could be detected that t gives equal results, src[:nSrc] will
|
||||
// indicated the last processed chunk of s for which the output is not equal
|
||||
// and dst[:nDst] will be the transform of this chunk.
|
||||
var nDst, nSrc int
|
||||
pDst := 0 // Used as index in both src and dst in this loop.
|
||||
for {
|
||||
n := copy(src, s[pDst:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pDst+n == len(s))
|
||||
|
||||
// Note 1: we will not enter the loop with pDst == len(s) and we will
|
||||
// not end the loop with it either. So if nSrc is 0, this means there is
|
||||
// some kind of error from which we cannot recover given the current
|
||||
// buffer sizes. We will give up in this case.
|
||||
// Note 2: it is not entirely correct to simply do a bytes.Equal as
|
||||
// a Transformer may buffer internally. It will work in most cases,
|
||||
// though, and no harm is done if it doesn't work.
|
||||
// TODO: let transformers implement an optional Spanner interface, akin
|
||||
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||
if nSrc == 0 || !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
break
|
||||
}
|
||||
|
||||
if pDst += nDst; pDst == len(s) {
|
||||
return s, pDst, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Move the bytes seen so far to dst.
|
||||
pSrc := pDst + nSrc
|
||||
if pDst+nDst <= initialBufSize {
|
||||
copy(dst[pDst:], dst[:nDst])
|
||||
} else {
|
||||
b := make([]byte, len(s)+nDst-nSrc)
|
||||
copy(b[pDst:], dst[:nDst])
|
||||
dst = b
|
||||
}
|
||||
copy(dst, s[:pDst])
|
||||
pDst += nDst
|
||||
|
||||
if err != nil && err != ErrShortDst && err != ErrShortSrc {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
|
||||
// Complete the string with the remainder.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err = t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
if pSrc == len(s) {
|
||||
return string(dst[:pDst]), pSrc, nil
|
||||
}
|
||||
case ErrShortDst:
|
||||
// Do not grow as long as we can make progress. This may avoid
|
||||
// excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
case ErrShortSrc:
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
default:
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
|
||||
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||
t.Reset()
|
||||
dst := make([]byte, len(b))
|
||||
pDst, pSrc := 0, 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], b[pSrc:], true)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
if err != ErrShortDst {
|
||||
return dst[:pDst], pSrc, err
|
||||
}
|
||||
|
||||
// Grow the destination buffer, but do not grow as long as we can make
|
||||
// progress. This may avoid excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,6 +1,86 @@
|
|||
{
|
||||
"comment": "",
|
||||
"ignore": "test",
|
||||
"package": [],
|
||||
"package": [
|
||||
{
|
||||
"checksumSHA1": "7NpGLW+EOhRLs5cDvi1S+LqUNQ8=",
|
||||
"path": "github.com/b3log/wide",
|
||||
"revision": "f96c8befdf3484ad4d3abdf7a11c7a3d6182d018",
|
||||
"revisionTime": "2018-03-13T04:09:30Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "83iEp3SqOoIkZUYyR7BOVP4vaGE=",
|
||||
"path": "github.com/bradfitz/goimports",
|
||||
"revision": "919f4f2bcea0744d4da4ae851fbf818ae11cba87",
|
||||
"revisionTime": "2014-12-11T23:42:42Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "GpJ9pCFYAiIZb68dQcGjpbXIKOs=",
|
||||
"path": "github.com/go-fsnotify/fsnotify",
|
||||
"revision": "6549b98005f3e4026ad9f50ef7d5011f40ba1397",
|
||||
"revisionTime": "2015-04-08T17:51:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "rk/QFSfmdW9TPNUWU6p5FUBIL3U=",
|
||||
"path": "github.com/gorilla/context",
|
||||
"revision": "215affda49addc4c8ef7e2534915df2c8c35c6cd",
|
||||
"revisionTime": "2014-12-17T16:02:51Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "h+XyrpEalNAO8hzMdYwYeYMpP6Y=",
|
||||
"path": "github.com/gorilla/securecookie",
|
||||
"revision": "8e98dd730fc43f1383f19615db6f2e702c9738e8",
|
||||
"revisionTime": "2015-03-27T15:58:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "lTK2wUc2cNL4G2ZGkYQa+RsB4l8=",
|
||||
"path": "github.com/gorilla/sessions",
|
||||
"revision": "f61c3ec2cf65d69e7efedfd4d060fe128882c951",
|
||||
"revisionTime": "2015-04-17T17:47:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "PtUilL8i32Zi0T4i/DpxMvLva7w=",
|
||||
"path": "github.com/gorilla/websocket",
|
||||
"revision": "a3ec486e6a7a41858210b0fc5d7b5df593b3c4a3",
|
||||
"revisionTime": "2015-05-30T03:03:52Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "5i+5zScV0FNuG3cmRzRPn6PFsfo=",
|
||||
"path": "github.com/nsf/gocode",
|
||||
"revision": "5070dacabf2a80deeaf4ddb0be3761d06fce7be5",
|
||||
"revisionTime": "2016-11-22T21:38:51Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "4fp/TH5nX7seO5p4qJfIj/BokbI=",
|
||||
"path": "github.com/visualfc/gotools",
|
||||
"revision": "b8348693492ca3791bccfa028f3c19634c11c5b5",
|
||||
"revisionTime": "2015-04-09T14:25:36Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "xe5hMqClV1HmKZ4GVg4bmSsVRE8=",
|
||||
"path": "golang.org/x/text/encoding",
|
||||
"revision": ""
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "d4g160kDwJesjZxpevi5Q41OG7o=",
|
||||
"path": "golang.org/x/text/encoding/internal",
|
||||
"revision": ""
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "wvwBoRLwt0iQqBP7tu0/M4vQCQo=",
|
||||
"path": "golang.org/x/text/encoding/internal/identifier",
|
||||
"revision": ""
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "YY+9reJJZdiidVtCfOGV2iWsXSw=",
|
||||
"path": "golang.org/x/text/encoding/simplifiedchinese",
|
||||
"revision": ""
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "baiYjEQuZN5kEgOBJPSsZCecTE8=",
|
||||
"path": "golang.org/x/text/transform",
|
||||
"revision": ""
|
||||
}
|
||||
],
|
||||
"rootPath": "github.com/b3log/wide"
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue