hack/update-vendor.sh

This commit is contained in:
Jordan Liggitt
2019-11-05 14:11:10 -05:00
parent 9a5b7c24ad
commit 297570e06a
932 changed files with 77190 additions and 28219 deletions

View File

@@ -1,21 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: api_proto/api.proto
/*
Package devtools_buildozer is a generated protocol buffer package.
package api_proto
It is generated from these files:
api_proto/api.proto
It has these top-level messages:
Output
RepeatedString
*/
package devtools_buildozer
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -26,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Output_Record_Field_ERROR int32
@@ -39,6 +31,7 @@ var Output_Record_Field_ERROR_name = map[int32]string{
0: "UNKNOWN",
1: "MISSING",
}
var Output_Record_Field_ERROR_value = map[string]int32{
"UNKNOWN": 0,
"MISSING": 1,
@@ -47,18 +40,42 @@ var Output_Record_Field_ERROR_value = map[string]int32{
func (x Output_Record_Field_ERROR) String() string {
return proto.EnumName(Output_Record_Field_ERROR_name, int32(x))
}
func (Output_Record_Field_ERROR) EnumDescriptor() ([]byte, []int) {
return fileDescriptor0, []int{0, 0, 0, 0}
return fileDescriptor_35e560d0f079cc1d, []int{0, 0, 0, 0}
}
type Output struct {
Records []*Output_Record `protobuf:"bytes,1,rep,name=records" json:"records,omitempty"`
Records []*Output_Record `protobuf:"bytes,1,rep,name=records,proto3" json:"records,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Output) Reset() { *m = Output{} }
func (m *Output) String() string { return proto.CompactTextString(m) }
func (*Output) ProtoMessage() {}
func (*Output) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Output) Reset() { *m = Output{} }
func (m *Output) String() string { return proto.CompactTextString(m) }
func (*Output) ProtoMessage() {}
func (*Output) Descriptor() ([]byte, []int) {
return fileDescriptor_35e560d0f079cc1d, []int{0}
}
func (m *Output) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Output.Unmarshal(m, b)
}
func (m *Output) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Output.Marshal(b, m, deterministic)
}
func (m *Output) XXX_Merge(src proto.Message) {
xxx_messageInfo_Output.Merge(m, src)
}
func (m *Output) XXX_Size() int {
return xxx_messageInfo_Output.Size(m)
}
func (m *Output) XXX_DiscardUnknown() {
xxx_messageInfo_Output.DiscardUnknown(m)
}
var xxx_messageInfo_Output proto.InternalMessageInfo
func (m *Output) GetRecords() []*Output_Record {
if m != nil {
@@ -68,13 +85,36 @@ func (m *Output) GetRecords() []*Output_Record {
}
type Output_Record struct {
Fields []*Output_Record_Field `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"`
Fields []*Output_Record_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Output_Record) Reset() { *m = Output_Record{} }
func (m *Output_Record) String() string { return proto.CompactTextString(m) }
func (*Output_Record) ProtoMessage() {}
func (*Output_Record) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
func (m *Output_Record) Reset() { *m = Output_Record{} }
func (m *Output_Record) String() string { return proto.CompactTextString(m) }
func (*Output_Record) ProtoMessage() {}
func (*Output_Record) Descriptor() ([]byte, []int) {
return fileDescriptor_35e560d0f079cc1d, []int{0, 0}
}
func (m *Output_Record) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Output_Record.Unmarshal(m, b)
}
func (m *Output_Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Output_Record.Marshal(b, m, deterministic)
}
func (m *Output_Record) XXX_Merge(src proto.Message) {
xxx_messageInfo_Output_Record.Merge(m, src)
}
func (m *Output_Record) XXX_Size() int {
return xxx_messageInfo_Output_Record.Size(m)
}
func (m *Output_Record) XXX_DiscardUnknown() {
xxx_messageInfo_Output_Record.DiscardUnknown(m)
}
var xxx_messageInfo_Output_Record proto.InternalMessageInfo
func (m *Output_Record) GetFields() []*Output_Record_Field {
if m != nil {
@@ -89,36 +129,65 @@ type Output_Record_Field struct {
// *Output_Record_Field_Number
// *Output_Record_Field_Error
// *Output_Record_Field_List
Value isOutput_Record_Field_Value `protobuf_oneof:"value"`
// Used internally by Buildozer to decide whether a field should be quoted
// when printing. This does not affect the contents of 'value'.
QuoteWhenPrinting bool `protobuf:"varint,7,opt,name=quote_when_printing,json=quoteWhenPrinting" json:"quote_when_printing,omitempty"`
Value isOutput_Record_Field_Value `protobuf_oneof:"value"`
QuoteWhenPrinting bool `protobuf:"varint,7,opt,name=quote_when_printing,json=quoteWhenPrinting,proto3" json:"quote_when_printing,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Output_Record_Field) Reset() { *m = Output_Record_Field{} }
func (m *Output_Record_Field) String() string { return proto.CompactTextString(m) }
func (*Output_Record_Field) ProtoMessage() {}
func (*Output_Record_Field) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0, 0} }
func (m *Output_Record_Field) Reset() { *m = Output_Record_Field{} }
func (m *Output_Record_Field) String() string { return proto.CompactTextString(m) }
func (*Output_Record_Field) ProtoMessage() {}
func (*Output_Record_Field) Descriptor() ([]byte, []int) {
return fileDescriptor_35e560d0f079cc1d, []int{0, 0, 0}
}
type isOutput_Record_Field_Value interface{ isOutput_Record_Field_Value() }
func (m *Output_Record_Field) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Output_Record_Field.Unmarshal(m, b)
}
func (m *Output_Record_Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Output_Record_Field.Marshal(b, m, deterministic)
}
func (m *Output_Record_Field) XXX_Merge(src proto.Message) {
xxx_messageInfo_Output_Record_Field.Merge(m, src)
}
func (m *Output_Record_Field) XXX_Size() int {
return xxx_messageInfo_Output_Record_Field.Size(m)
}
func (m *Output_Record_Field) XXX_DiscardUnknown() {
xxx_messageInfo_Output_Record_Field.DiscardUnknown(m)
}
var xxx_messageInfo_Output_Record_Field proto.InternalMessageInfo
type isOutput_Record_Field_Value interface {
isOutput_Record_Field_Value()
}
type Output_Record_Field_Text struct {
Text string `protobuf:"bytes,1,opt,name=text,oneof"`
}
type Output_Record_Field_Number struct {
Number int32 `protobuf:"varint,2,opt,name=number,oneof"`
}
type Output_Record_Field_Error struct {
Error Output_Record_Field_ERROR `protobuf:"varint,3,opt,name=error,enum=devtools.buildozer.Output_Record_Field_ERROR,oneof"`
}
type Output_Record_Field_List struct {
List *RepeatedString `protobuf:"bytes,5,opt,name=list,oneof"`
Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"`
}
func (*Output_Record_Field_Text) isOutput_Record_Field_Value() {}
type Output_Record_Field_Number struct {
Number int32 `protobuf:"varint,2,opt,name=number,proto3,oneof"`
}
type Output_Record_Field_Error struct {
Error Output_Record_Field_ERROR `protobuf:"varint,3,opt,name=error,proto3,enum=devtools.buildozer.Output_Record_Field_ERROR,oneof"`
}
type Output_Record_Field_List struct {
List *RepeatedString `protobuf:"bytes,5,opt,name=list,proto3,oneof"`
}
func (*Output_Record_Field_Text) isOutput_Record_Field_Value() {}
func (*Output_Record_Field_Number) isOutput_Record_Field_Value() {}
func (*Output_Record_Field_Error) isOutput_Record_Field_Value() {}
func (*Output_Record_Field_List) isOutput_Record_Field_Value() {}
func (*Output_Record_Field_Error) isOutput_Record_Field_Value() {}
func (*Output_Record_Field_List) isOutput_Record_Field_Value() {}
func (m *Output_Record_Field) GetValue() isOutput_Record_Field_Value {
if m != nil {
@@ -162,9 +231,9 @@ func (m *Output_Record_Field) GetQuoteWhenPrinting() bool {
return false
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Output_Record_Field) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Output_Record_Field_OneofMarshaler, _Output_Record_Field_OneofUnmarshaler, _Output_Record_Field_OneofSizer, []interface{}{
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Output_Record_Field) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Output_Record_Field_Text)(nil),
(*Output_Record_Field_Number)(nil),
(*Output_Record_Field_Error)(nil),
@@ -172,102 +241,37 @@ func (*Output_Record_Field) XXX_OneofFuncs() (func(msg proto.Message, b *proto.B
}
}
func _Output_Record_Field_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Output_Record_Field)
// value
switch x := m.Value.(type) {
case *Output_Record_Field_Text:
b.EncodeVarint(1<<3 | proto.WireBytes)
b.EncodeStringBytes(x.Text)
case *Output_Record_Field_Number:
b.EncodeVarint(2<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.Number))
case *Output_Record_Field_Error:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.Error))
case *Output_Record_Field_List:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.List); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Output_Record_Field.Value has unexpected type %T", x)
}
return nil
}
func _Output_Record_Field_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Output_Record_Field)
switch tag {
case 1: // value.text
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Output_Record_Field_Text{x}
return true, err
case 2: // value.number
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Output_Record_Field_Number{int32(x)}
return true, err
case 3: // value.error
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Output_Record_Field_Error{Output_Record_Field_ERROR(x)}
return true, err
case 5: // value.list
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(RepeatedString)
err := b.DecodeMessage(msg)
m.Value = &Output_Record_Field_List{msg}
return true, err
default:
return false, nil
}
}
func _Output_Record_Field_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Output_Record_Field)
// value
switch x := m.Value.(type) {
case *Output_Record_Field_Text:
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.Text)))
n += len(x.Text)
case *Output_Record_Field_Number:
n += proto.SizeVarint(2<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.Number))
case *Output_Record_Field_Error:
n += proto.SizeVarint(3<<3 | proto.WireVarint)
n += proto.SizeVarint(uint64(x.Error))
case *Output_Record_Field_List:
s := proto.Size(x.List)
n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type RepeatedString struct {
Strings []string `protobuf:"bytes,1,rep,name=strings" json:"strings,omitempty"`
Strings []string `protobuf:"bytes,1,rep,name=strings,proto3" json:"strings,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RepeatedString) Reset() { *m = RepeatedString{} }
func (m *RepeatedString) String() string { return proto.CompactTextString(m) }
func (*RepeatedString) ProtoMessage() {}
func (*RepeatedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *RepeatedString) Reset() { *m = RepeatedString{} }
func (m *RepeatedString) String() string { return proto.CompactTextString(m) }
func (*RepeatedString) ProtoMessage() {}
func (*RepeatedString) Descriptor() ([]byte, []int) {
return fileDescriptor_35e560d0f079cc1d, []int{1}
}
func (m *RepeatedString) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RepeatedString.Unmarshal(m, b)
}
func (m *RepeatedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RepeatedString.Marshal(b, m, deterministic)
}
func (m *RepeatedString) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepeatedString.Merge(m, src)
}
func (m *RepeatedString) XXX_Size() int {
return xxx_messageInfo_RepeatedString.Size(m)
}
func (m *RepeatedString) XXX_DiscardUnknown() {
xxx_messageInfo_RepeatedString.DiscardUnknown(m)
}
var xxx_messageInfo_RepeatedString proto.InternalMessageInfo
func (m *RepeatedString) GetStrings() []string {
if m != nil {
@@ -277,36 +281,37 @@ func (m *RepeatedString) GetStrings() []string {
}
func init() {
proto.RegisterEnum("devtools.buildozer.Output_Record_Field_ERROR", Output_Record_Field_ERROR_name, Output_Record_Field_ERROR_value)
proto.RegisterType((*Output)(nil), "devtools.buildozer.Output")
proto.RegisterType((*Output_Record)(nil), "devtools.buildozer.Output.Record")
proto.RegisterType((*Output_Record_Field)(nil), "devtools.buildozer.Output.Record.Field")
proto.RegisterType((*RepeatedString)(nil), "devtools.buildozer.RepeatedString")
proto.RegisterEnum("devtools.buildozer.Output_Record_Field_ERROR", Output_Record_Field_ERROR_name, Output_Record_Field_ERROR_value)
}
func init() { proto.RegisterFile("api_proto/api.proto", fileDescriptor0) }
func init() { proto.RegisterFile("api_proto/api.proto", fileDescriptor_35e560d0f079cc1d) }
var fileDescriptor0 = []byte{
// 336 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xc1, 0x4a, 0xf3, 0x40,
0x14, 0x85, 0x33, 0x6d, 0x93, 0xfc, 0xbd, 0x85, 0xd2, 0x7f, 0xea, 0x62, 0xe8, 0x2a, 0xcd, 0xc6,
0x20, 0x38, 0x42, 0xdd, 0x08, 0x2e, 0x04, 0xa1, 0xda, 0x22, 0xa6, 0x32, 0x45, 0xba, 0x2c, 0xa9,
0x19, 0xdb, 0x81, 0x98, 0x89, 0x93, 0x49, 0x15, 0x9f, 0xc3, 0x87, 0xf0, 0xb1, 0x7c, 0x14, 0xc9,
0x24, 0x15, 0x44, 0x41, 0x77, 0xf7, 0xdc, 0x7b, 0xbe, 0x73, 0x42, 0x06, 0xfa, 0x51, 0x26, 0x96,
0x99, 0x92, 0x5a, 0x1e, 0x45, 0x99, 0xa0, 0x66, 0xc2, 0x38, 0xe6, 0x5b, 0x2d, 0x65, 0x92, 0xd3,
0x55, 0x21, 0x92, 0x58, 0xbe, 0x70, 0xe5, 0xbf, 0x35, 0xc1, 0x99, 0x15, 0x3a, 0x2b, 0x34, 0x3e,
0x05, 0x57, 0xf1, 0x3b, 0xa9, 0xe2, 0x9c, 0x20, 0xaf, 0x19, 0x74, 0x46, 0x43, 0xfa, 0x1d, 0xa0,
0x95, 0x99, 0x32, 0xe3, 0x64, 0x3b, 0x62, 0xf0, 0xde, 0x00, 0xa7, 0xda, 0xe1, 0x33, 0x70, 0xee,
0x05, 0x4f, 0x3e, 0x63, 0xf6, 0x7f, 0x8d, 0xa1, 0x17, 0xa5, 0x9f, 0xd5, 0xd8, 0xe0, 0xb5, 0x01,
0xb6, 0xd9, 0xe0, 0x3d, 0x68, 0x69, 0xfe, 0xac, 0x09, 0xf2, 0x50, 0xd0, 0x9e, 0x58, 0xcc, 0x28,
0x4c, 0xc0, 0x49, 0x8b, 0x87, 0x15, 0x57, 0xa4, 0xe1, 0xa1, 0xc0, 0x9e, 0x58, 0xac, 0xd6, 0x78,
0x0c, 0x36, 0x57, 0x4a, 0x2a, 0xd2, 0xf4, 0x50, 0xd0, 0x1d, 0x1d, 0xfe, 0xb1, 0x99, 0x8e, 0x19,
0x9b, 0xb1, 0x89, 0xc5, 0x2a, 0x1a, 0x9f, 0x40, 0x2b, 0x11, 0xb9, 0x26, 0xb6, 0x87, 0x82, 0xce,
0xc8, 0xff, 0x29, 0x85, 0xf1, 0x8c, 0x47, 0x9a, 0xc7, 0x73, 0xad, 0x44, 0xba, 0x2e, 0x3f, 0xad,
0x24, 0x30, 0x85, 0xfe, 0x63, 0x21, 0x35, 0x5f, 0x3e, 0x6d, 0x78, 0xba, 0xcc, 0x94, 0x48, 0xb5,
0x48, 0xd7, 0xc4, 0xf5, 0x50, 0xf0, 0x8f, 0xfd, 0x37, 0xa7, 0xc5, 0x86, 0xa7, 0x37, 0xf5, 0xc1,
0x1f, 0x82, 0x6d, 0xba, 0x71, 0x07, 0xdc, 0xdb, 0xf0, 0x2a, 0x9c, 0x2d, 0xc2, 0x9e, 0x55, 0x8a,
0xeb, 0xe9, 0x7c, 0x3e, 0x0d, 0x2f, 0x7b, 0xe8, 0xdc, 0x05, 0x7b, 0x1b, 0x25, 0x05, 0xf7, 0x0f,
0xa0, 0xfb, 0xb5, 0x15, 0x13, 0x70, 0x73, 0x33, 0x55, 0xbf, 0xba, 0xcd, 0x76, 0x72, 0xe5, 0x98,
0x17, 0x3f, 0xfe, 0x08, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x62, 0x58, 0xc4, 0x08, 0x02, 0x00, 0x00,
var fileDescriptor_35e560d0f079cc1d = []byte{
// 346 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x6a, 0xe3, 0x30,
0x10, 0x86, 0xad, 0x24, 0xb6, 0x37, 0x63, 0x08, 0x59, 0x65, 0x0f, 0x22, 0x27, 0xc7, 0x97, 0x35,
0x0b, 0xab, 0x85, 0xec, 0xa5, 0xd0, 0x43, 0x21, 0x90, 0x36, 0xa1, 0xd4, 0x29, 0x0a, 0x25, 0xd0,
0x4b, 0x70, 0x6a, 0x35, 0x11, 0xb8, 0x96, 0x2b, 0xcb, 0x69, 0xe9, 0x73, 0xf4, 0x21, 0xfa, 0x58,
0x7d, 0x94, 0x62, 0x39, 0x09, 0x94, 0x16, 0xda, 0x93, 0xe6, 0x9f, 0x99, 0xef, 0xff, 0x85, 0x10,
0xf4, 0xe2, 0x5c, 0x2c, 0x73, 0x25, 0xb5, 0xfc, 0x17, 0xe7, 0x82, 0x9a, 0x0a, 0xe3, 0x84, 0x6f,
0xb5, 0x94, 0x69, 0x41, 0x57, 0xa5, 0x48, 0x13, 0xf9, 0xc4, 0x55, 0xf0, 0xd2, 0x04, 0x67, 0x56,
0xea, 0xbc, 0xd4, 0xf8, 0x18, 0x5c, 0xc5, 0x6f, 0xa4, 0x4a, 0x0a, 0x82, 0xfc, 0x66, 0xe8, 0x0d,
0x07, 0xf4, 0x23, 0x40, 0xeb, 0x65, 0xca, 0xcc, 0x26, 0xdb, 0x13, 0xfd, 0xd7, 0x06, 0x38, 0x75,
0x0f, 0x9f, 0x80, 0x73, 0x2b, 0x78, 0x7a, 0xb0, 0xf9, 0xfd, 0xa5, 0x0d, 0x3d, 0xad, 0xf6, 0xd9,
0x0e, 0xeb, 0x3f, 0x37, 0xc0, 0x36, 0x1d, 0xfc, 0x0b, 0x5a, 0x9a, 0x3f, 0x6a, 0x82, 0x7c, 0x14,
0xb6, 0x27, 0x16, 0x33, 0x0a, 0x13, 0x70, 0xb2, 0xf2, 0x6e, 0xc5, 0x15, 0x69, 0xf8, 0x28, 0xb4,
0x27, 0x16, 0xdb, 0x69, 0x3c, 0x06, 0x9b, 0x2b, 0x25, 0x15, 0x69, 0xfa, 0x28, 0xec, 0x0c, 0xff,
0x7e, 0x33, 0x99, 0x8e, 0x19, 0x9b, 0xb1, 0x89, 0xc5, 0x6a, 0x1a, 0x1f, 0x41, 0x2b, 0x15, 0x85,
0x26, 0xb6, 0x8f, 0x42, 0x6f, 0x18, 0x7c, 0xe6, 0xc2, 0x78, 0xce, 0x63, 0xcd, 0x93, 0xb9, 0x56,
0x22, 0x5b, 0x57, 0x57, 0xab, 0x08, 0x4c, 0xa1, 0x77, 0x5f, 0x4a, 0xcd, 0x97, 0x0f, 0x1b, 0x9e,
0x2d, 0x73, 0x25, 0x32, 0x2d, 0xb2, 0x35, 0x71, 0x7d, 0x14, 0xfe, 0x60, 0x3f, 0xcd, 0x68, 0xb1,
0xe1, 0xd9, 0xe5, 0x6e, 0x10, 0x0c, 0xc0, 0x36, 0xd9, 0xd8, 0x03, 0xf7, 0x2a, 0x3a, 0x8f, 0x66,
0x8b, 0xa8, 0x6b, 0x55, 0xe2, 0x62, 0x3a, 0x9f, 0x4f, 0xa3, 0xb3, 0x2e, 0x1a, 0xb9, 0x60, 0x6f,
0xe3, 0xb4, 0xe4, 0xc1, 0x1f, 0xe8, 0xbc, 0x4f, 0xc5, 0x04, 0xdc, 0xc2, 0x54, 0xf5, 0x53, 0xb7,
0xd9, 0x5e, 0x8e, 0xbc, 0xeb, 0xf6, 0xe1, 0x07, 0xac, 0x1c, 0x73, 0xfc, 0x7f, 0x0b, 0x00, 0x00,
0xff, 0xff, 0xa1, 0xc3, 0xf5, 0x19, 0x15, 0x02, 0x00, 0x00,
}

View File

@@ -14,6 +14,8 @@ syntax = "proto3";
package devtools.buildozer;
option go_package = "api_proto";
message Output {
repeated Record records = 1;
message Record {

View File

@@ -15,38 +15,119 @@ distributed under the License is distributed on an "AS IS" BASIS,
limitations under the License.
"""
load(
"@io_bazel_rules_go//go/private:providers.bzl",
"GoSource",
)
_GO_YACC_TOOL = "@org_golang_x_tools//cmd/goyacc"
def go_yacc(src, out, visibility=None):
"""Runs go tool yacc -o $out $src."""
native.genrule(
name = src + ".go_yacc",
srcs = [src],
outs = [out],
tools = [_GO_YACC_TOOL],
cmd = ("export GOROOT=$$(dirname $(location " + _GO_YACC_TOOL + "))/..;" +
" $(location " + _GO_YACC_TOOL + ") " +
" -o $(location " + out + ") $(SRCS)"),
visibility = visibility,
local = 1,
)
def go_yacc(src, out, visibility = None):
"""Runs go tool yacc -o $out $src."""
native.genrule(
name = src + ".go_yacc",
srcs = [src],
outs = [out],
tools = [_GO_YACC_TOOL],
cmd = ("export GOROOT=$$(dirname $(location " + _GO_YACC_TOOL + "))/..;" +
" $(location " + _GO_YACC_TOOL + ") " +
" -o $(location " + out + ") $(SRCS) > /dev/null"),
visibility = visibility,
)
def _extract_go_src(ctx):
"""Thin rule that exposes the GoSource from a go_library."""
return [DefaultInfo(files = depset(ctx.attr.library[GoSource].srcs))]
extract_go_src = rule(
implementation = _extract_go_src,
attrs = {
"library": attr.label(
providers = [GoSource],
),
},
)
def genfile_check_test(src, gen):
"""Asserts that any checked-in generated code matches regen."""
if not src:
fail("src is required", "src")
if not gen:
fail("gen is required", "gen")
native.genrule(
name = src + "_checksh",
outs = [src + "_check.sh"],
cmd = "echo 'diff $$@' > $@",
)
native.sh_test(
name = src + "_checkshtest",
size = "small",
srcs = [src + "_check.sh"],
data = [src, gen],
args = ["$(location " + src + ")", "$(location " + gen + ")"],
)
"""Asserts that any checked-in generated code matches bazel gen."""
if not src:
fail("src is required", "src")
if not gen:
fail("gen is required", "gen")
native.genrule(
name = src + "_checksh",
outs = [src + "_check.sh"],
cmd = r"""cat >$@ <<'eof'
#!/bin/bash
# Script generated by @com_github_bazelbuild_buildtools//build:build_defs.bzl
# --- begin runfiles.bash initialization ---
# Copy-pasted from Bazel's Bash runfiles library (tools/bash/runfiles/runfiles.bash).
set -euo pipefail
if [[ ! -d "$${RUNFILES_DIR:-/dev/null}" && ! -f "$${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
if [[ -f "$$0.runfiles_manifest" ]]; then
export RUNFILES_MANIFEST_FILE="$$0.runfiles_manifest"
elif [[ -f "$$0.runfiles/MANIFEST" ]]; then
export RUNFILES_MANIFEST_FILE="$$0.runfiles/MANIFEST"
elif [[ -f "$$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
export RUNFILES_DIR="$$0.runfiles"
fi
fi
if [[ -f "$${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then
source "$${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"
elif [[ -f "$${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then
source "$$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \
"$$RUNFILES_MANIFEST_FILE" | cut -d ' ' -f 2-)"
else
echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"
exit 1
fi
# --- end runfiles.bash initialization ---
[[ "$$1" = external/* ]] && F1="$${1#external/}" || F1="$$TEST_WORKSPACE/$$1"
[[ "$$2" = external/* ]] && F2="$${2#external/}" || F2="$$TEST_WORKSPACE/$$2"
F1="$$(rlocation "$$F1")"
F2="$$(rlocation "$$F2")"
diff -q "$$F1" "$$F2"
eof
""",
)
native.sh_test(
name = src + "_checkshtest",
size = "small",
srcs = [src + "_check.sh"],
deps = ["@bazel_tools//tools/bash/runfiles"],
data = [src, gen],
args = ["$(location " + src + ")", "$(location " + gen + ")"],
)
# magic copy rule used to update the checked-in version
native.genrule(
name = src + "_copysh",
srcs = [gen],
outs = [src + "copy.sh"],
cmd = "echo 'cp $${BUILD_WORKSPACE_DIRECTORY}/$(location " + gen +
") $${BUILD_WORKSPACE_DIRECTORY}/" + native.package_name() + "/" + src + "' > $@",
)
native.sh_binary(
name = src + "_copy",
srcs = [src + "_copysh"],
data = [gen],
)
def go_proto_checkedin_test(src, proto = "go_default_library"):
"""Asserts that any checked-in .pb.go code matches bazel gen."""
genfile = src + "_genfile"
extract_go_src(
name = genfile + "go",
library = proto,
)
# TODO(pmbethe09): why is the extra copy needed?
native.genrule(
name = genfile,
srcs = [genfile + "go"],
outs = [genfile + ".go"],
cmd = "cp $< $@",
)
genfile_check_test(src, genfile)

View File

@@ -20,18 +20,146 @@ package build
import (
"bytes"
"fmt"
"path/filepath"
"sort"
"strings"
"unicode/utf8"
"github.com/bazelbuild/buildtools/tables"
)
// FileType represents a type of a file (default (for .bzl files), BUILD, or WORKSPACE).
// Certain formatting or refactoring rules can be applied to several file types, so they support
// bitwise operations: `type1 | type2` can represent a scope (e.g. BUILD and WORKSPACE files) and
// `scope & fileType` can be used to check whether a file type belongs to a scope.
type FileType int
const (
// TypeDefault represents general Starlark files
TypeDefault FileType = 1 << iota
// TypeBuild represents BUILD files
TypeBuild
// TypeWorkspace represents WORKSPACE files
TypeWorkspace
// TypeBzl represents .bzl files
TypeBzl
)
func (t FileType) String() string {
switch t {
case TypeDefault:
return "default"
case TypeBuild:
return "BUILD"
case TypeWorkspace:
return "WORKSPACE"
case TypeBzl:
return ".bzl"
}
return "unknown"
}
// ParseBuild parses a file, marks it as a BUILD file and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
func ParseBuild(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
f, err := in.parse()
if f != nil {
f.Type = TypeBuild
}
return f, err
}
// ParseWorkspace parses a file, marks it as a WORKSPACE file and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
func ParseWorkspace(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
f, err := in.parse()
if f != nil {
f.Type = TypeWorkspace
}
return f, err
}
// ParseBzl parses a file, marks it as a .bzl file and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
func ParseBzl(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
f, err := in.parse()
if f != nil {
f.Type = TypeBzl
}
return f, err
}
// ParseDefault parses a file, marks it as a generic Starlark file and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
func ParseDefault(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
f, err := in.parse()
if f != nil {
f.Type = TypeDefault
}
return f, err
}
func getFileType(filename string) FileType {
if filename == "" { // stdin
return TypeDefault
}
basename := strings.ToLower(filepath.Base(filename))
if strings.HasSuffix(basename, ".oss") {
basename = basename[:len(basename)-4]
}
ext := filepath.Ext(basename)
switch ext {
case ".bzl":
return TypeBzl
case ".sky":
return TypeDefault
}
base := basename[:len(basename)-len(ext)]
switch {
case ext == ".build" || base == "build" || strings.HasPrefix(base, "build."):
return TypeBuild
case ext == ".workspace" || base == "workspace" || strings.HasPrefix(base, "workspace."):
return TypeWorkspace
}
return TypeDefault
}
// Parse parses the input data and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
// Uses the filename to detect the formatting type (build, workspace, or default) and calls
// ParseBuild, ParseWorkspace, or ParseDefault correspondingly.
func Parse(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
return in.parse()
switch getFileType(filename) {
case TypeBuild:
return ParseBuild(filename, data)
case TypeWorkspace:
return ParseWorkspace(filename, data)
case TypeBzl:
return ParseBzl(filename, data)
}
return ParseDefault(filename, data)
}
// ParseError contains information about the error encountered during parsing.
type ParseError struct {
Message string
Filename string
Pos Position
}
// Error returns a string representation of the parse error.
func (e ParseError) Error() string {
filename := e.Filename
if filename == "" {
filename = "<stdin>"
}
return fmt.Sprintf("%s:%d:%d: %v", filename, e.Pos.Line, e.Pos.LineRune, e.Message)
}
// An input represents a single input file being parsed.
@@ -45,7 +173,6 @@ type input struct {
pos Position // current input position
lineComments []Comment // accumulated line comments
suffixComments []Comment // accumulated suffix comments
endStmt int // position of the end of the current statement
depth int // nesting of [ ] { } ( )
cleanLine bool // true if the current line only contains whitespace before the current position
indent int // current line indentation in spaces
@@ -73,7 +200,6 @@ func newInput(filename string, data []byte) *input {
pos: Position{Line: 1, LineRune: 1, Byte: 0},
cleanLine: true,
indents: []int{0},
endStmt: -1, // -1 denotes it's not inside a statement
}
}
@@ -92,7 +218,7 @@ func (in *input) parse() (f *File, err error) {
if e == in.parseError {
err = in.parseError
} else {
err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
err = ParseError{Message: fmt.Sprintf("internal error: %v", e), Filename: in.filename, Pos: in.pos}
}
}
}()
@@ -117,7 +243,7 @@ func (in *input) Error(s string) {
if s == "syntax error" && in.lastToken != "" {
s += " near " + in.lastToken
}
in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
in.parseError = ParseError{Message: s, Filename: in.filename, Pos: in.pos}
panic(in.parseError)
}
@@ -187,25 +313,6 @@ func (in *input) Lex(val *yySymType) int {
// Skip past spaces, stopping at non-space or EOF.
countNL := 0 // number of newlines we've skipped past
for !in.eof() {
// If a single statement is split into multiple lines, we don't need
// to track indentations and unindentations within these lines. For example:
//
// def f(
// # This indentation should be ignored
// x):
// # This unindentation should be ignored
// # Actual indentation is from 0 to 2 spaces here
// return x
//
// To handle this case, when we reach the beginning of a statement we scan forward to see where
// it should end and record the number of input bytes remaining at that endpoint.
//
// If --format_bzl is set to false, top level blocks (e.g. an entire function definition)
// is considered as a single statement.
if in.endStmt != -1 && len(in.remaining) == in.endStmt {
in.endStmt = -1
}
// Skip over spaces. Count newlines so we can give the parser
// information about where top-level blank lines are,
// for top-level comment assignment.
@@ -214,7 +321,7 @@ func (in *input) Lex(val *yySymType) int {
if c == '\n' {
in.indent = 0
in.cleanLine = true
if in.endStmt == -1 {
if in.depth == 0 {
// Not in a statememt. Tell parser about top-level blank line.
in.startToken(val)
in.readRune()
@@ -234,18 +341,23 @@ func (in *input) Lex(val *yySymType) int {
// If a line contains just a comment its indentation level doesn't matter.
// Reset it to zero.
in.indent = 0
isLineComment := in.cleanLine
in.cleanLine = true
// Is this comment the only thing on its line?
// Find the last \n before this # and see if it's all
// spaces from there to here.
// If it's a suffix comment but the last non-space symbol before
// it is one of (, [, or {, treat it as a line comment that should be
// it is one of (, [, or {, or it's a suffix comment to "):"
// (e.g. trailing closing bracket or a function definition),
// treat it as a line comment that should be
// put inside the corresponding block.
i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
prefix := bytes.TrimSpace(in.complete[i+1 : in.pos.Byte])
prefix = bytes.Replace(prefix, []byte{' '}, []byte{}, -1)
isSuffix := true
if len(prefix) == 0 ||
(len(prefix) == 2 && prefix[0] == ')' && prefix[1] == ':') ||
prefix[len(prefix)-1] == '[' ||
prefix[len(prefix)-1] == '(' ||
prefix[len(prefix)-1] == '{' {
@@ -266,7 +378,7 @@ func (in *input) Lex(val *yySymType) int {
// If we are at top level (not in a rule), hand the comment to
// the parser as a _COMMENT token. The grammar is written
// to handle top-level comments itself.
if in.endStmt == -1 {
if in.depth == 0 && isLineComment {
// Not in a statement. Tell parser about top-level comment.
return _COMMENT
}
@@ -296,9 +408,9 @@ func (in *input) Lex(val *yySymType) int {
}
// Check for changes in indentation
// Skip if --format_bzl is set to false, if we're inside a statement, or if there were non-space
// Skip if we're inside a statement, or if there were non-space
// characters before in the current line.
if tables.FormatBzlFiles && in.endStmt == -1 && in.cleanLine {
if in.depth == 0 && in.cleanLine {
if in.indent > in.currentIndent() {
// A new indentation block starts
in.indents = append(in.indents, in.indent)
@@ -340,11 +452,6 @@ func (in *input) Lex(val *yySymType) int {
return _EOF
}
// If endStmt is 0, we need to recompute where the end of the next statement is.
if in.endStmt == -1 {
in.endStmt = len(in.skipStmt(in.remaining))
}
// Punctuation tokens.
switch c := in.peekRune(); c {
case '[', '(', '{':
@@ -361,11 +468,35 @@ func (in *input) Lex(val *yySymType) int {
in.readRune()
return c
case '<', '>', '=', '!', '+', '-', '*', '/', '%': // possibly followed by =
case '<', '>', '=', '!', '+', '-', '*', '/', '%', '|', '&', '~', '^': // possibly followed by =
in.readRune()
if c == '/' && in.peekRune() == '/' {
// integer division
if c == '~' {
// unary bitwise not, shouldn't be followed by anything
return c
}
if c == '*' && in.peekRune() == '*' {
// double asterisk
in.readRune()
return _STAR_STAR
}
if c == in.peekRune() {
switch c {
case '/':
// integer division
in.readRune()
c = _INT_DIV
case '<':
// left shift
in.readRune()
c = _BIT_LSH
case '>':
// right shift
in.readRune()
c = _BIT_RSH
}
}
if in.peekRune() == '=' {
@@ -442,7 +573,7 @@ func (in *input) Lex(val *yySymType) int {
}
}
in.endToken(val)
s, triple, err := unquote(val.tok)
s, triple, err := Unquote(val.tok)
if err != nil {
in.Error(fmt.Sprint(err))
}
@@ -456,19 +587,6 @@ func (in *input) Lex(val *yySymType) int {
in.Error(fmt.Sprintf("unexpected input character %#q", c))
}
if !tables.FormatBzlFiles {
// Look for raw Python block (class, def, if, etc at beginning of line) and pass through.
if in.depth == 0 && in.pos.LineRune == 1 && hasPythonPrefix(in.remaining) {
// Find end of Python block and advance input beyond it.
// Have to loop calling readRune in order to maintain line number info.
rest := in.skipStmt(in.remaining)
for len(in.remaining) > len(rest) {
in.readRune()
}
return _PYTHON
}
}
// Scan over alphanumeric identifier.
for {
c := in.peekRune()
@@ -484,7 +602,17 @@ func (in *input) Lex(val *yySymType) int {
if k := keywordToken[val.tok]; k != 0 {
return k
}
switch val.tok {
case "pass":
return _PASS
case "break":
return _BREAK
case "continue":
return _CONTINUE
}
if len(val.tok) > 0 && val.tok[0] >= '0' && val.tok[0] <= '9' {
return _NUMBER
}
return _IDENT
}
@@ -516,164 +644,6 @@ var keywordToken = map[string]int{
"return": _RETURN,
}
// Python scanning.
// About 1% of BUILD files embed arbitrary Python into the file.
// We do not attempt to parse it. Instead, we lex just enough to scan
// beyond it, treating the Python block as an unintepreted blob.
// hasPythonPrefix reports whether p begins with a keyword that would
// introduce an uninterpreted Python block.
func hasPythonPrefix(p []byte) bool {
if tables.FormatBzlFiles {
return false
}
for _, pre := range prefixes {
if hasPrefixSpace(p, pre) {
return true
}
}
return false
}
// These keywords introduce uninterpreted Python blocks.
var prefixes = []string{
"assert",
"class",
"def",
"del",
"for",
"if",
"try",
"else",
"elif",
"except",
}
// hasPrefixSpace reports whether p begins with pre followed by a space or colon.
func hasPrefixSpace(p []byte, pre string) bool {
if len(p) <= len(pre) || p[len(pre)] != ' ' && p[len(pre)] != '\t' && p[len(pre)] != ':' {
return false
}
for i := range pre {
if p[i] != pre[i] {
return false
}
}
return true
}
// A utility function for the legacy formatter.
// Returns whether a given code starts with a top-level statement (maybe with some preceeding
// comments and blank lines)
func isOutsideBlock(b []byte) bool {
isBlankLine := true
isComment := false
for _, c := range b {
switch {
case c == ' ' || c == '\t' || c == '\r':
isBlankLine = false
case c == '#':
isBlankLine = false
isComment = true
case c == '\n':
isBlankLine = true
isComment = false
default:
if !isComment {
return isBlankLine
}
}
}
return true
}
// skipStmt returns the data remaining after the statement beginning at p.
// It does not advance the input position.
// (The only reason for the input receiver is to be able to call in.Error.)
func (in *input) skipStmt(p []byte) []byte {
quote := byte(0) // if non-zero, the kind of quote we're in
tripleQuote := false // if true, the quote is a triple quote
depth := 0 // nesting depth for ( ) [ ] { }
var rest []byte // data after the Python block
defer func() {
if quote != 0 {
in.Error("EOF scanning Python quoted string")
}
}()
// Scan over input one byte at a time until we find
// an unindented, non-blank, non-comment line
// outside quoted strings and brackets.
for i := 0; i < len(p); i++ {
c := p[i]
if quote != 0 && c == quote && !tripleQuote {
quote = 0
continue
}
if quote != 0 && c == quote && tripleQuote && i+2 < len(p) && p[i+1] == quote && p[i+2] == quote {
i += 2
quote = 0
tripleQuote = false
continue
}
if quote != 0 {
if c == '\\' {
i++ // skip escaped char
}
continue
}
if c == '\'' || c == '"' {
if i+2 < len(p) && p[i+1] == c && p[i+2] == c {
quote = c
tripleQuote = true
i += 2
continue
}
quote = c
continue
}
if depth == 0 && i > 0 && p[i] == '\n' && p[i-1] != '\\' {
// Possible stopping point. Save the earliest one we find.
if rest == nil {
rest = p[i:]
}
if tables.FormatBzlFiles {
// In the bzl files mode we only care about the end of the statement, we've found it.
return rest
}
// In the legacy mode we need to find where the current block ends
if isOutsideBlock(p[i+1:]) {
return rest
}
// Not a stopping point after all.
rest = nil
}
switch c {
case '#':
// Skip comment.
for i < len(p) && p[i] != '\n' {
i++
}
// Rewind 1 position back because \n should be handled at the next iteration
i--
case '(', '[', '{':
depth++
case ')', ']', '}':
depth--
}
}
return rest
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
@@ -707,12 +677,21 @@ func (in *input) order(v Expr) {
in.order(x)
}
in.order(&v.End)
case *PythonBlock:
// nothing
case *LoadStmt:
in.order(v.Module)
for i := range v.From {
in.order(v.To[i])
in.order(v.From[i])
}
in.order(&v.Rparen)
case *LiteralExpr:
// nothing
case *StringExpr:
// nothing
case *Ident:
// nothing
case *BranchStmt:
// nothing
case *DotExpr:
in.order(v.X)
case *ListExpr:
@@ -720,9 +699,9 @@ func (in *input) order(v Expr) {
in.order(x)
}
in.order(&v.End)
case *ListForExpr:
in.order(v.X)
for _, c := range v.For {
case *Comprehension:
in.order(v.Body)
for _, c := range v.Clauses {
in.order(c)
}
in.order(&v.End)
@@ -731,16 +710,9 @@ func (in *input) order(v Expr) {
in.order(x)
}
in.order(&v.End)
case *ForClauseWithIfClausesOpt:
in.order(v.For)
for _, c := range v.Ifs {
in.order(c)
}
case *ForClause:
for _, name := range v.Var {
in.order(name)
}
in.order(v.Expr)
in.order(v.Vars)
in.order(v.X)
case *IfClause:
in.order(v.Cond)
case *KeyValueExpr:
@@ -755,12 +727,17 @@ func (in *input) order(v Expr) {
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
if !v.NoBrackets {
in.order(&v.End)
}
case *UnaryExpr:
in.order(v.X)
case *BinaryExpr:
in.order(v.X)
in.order(v.Y)
case *AssignExpr:
in.order(v.LHS)
in.order(v.RHS)
case *ConditionalExpr:
in.order(v.Then)
in.order(v.Test)
@@ -777,35 +754,39 @@ func (in *input) order(v Expr) {
in.order(v.X)
in.order(v.Y)
case *LambdaExpr:
for _, name := range v.Var {
in.order(name)
for _, param := range v.Params {
in.order(param)
}
in.order(v.Expr)
case *ReturnExpr:
if v.X != nil {
in.order(v.X)
for _, expr := range v.Body {
in.order(expr)
}
case *FuncDef:
for _, x := range v.Args {
case *ReturnStmt:
if v.Result != nil {
in.order(v.Result)
}
case *DefStmt:
for _, x := range v.Params {
in.order(x)
}
for _, x := range v.Body.Statements {
for _, x := range v.Body {
in.order(x)
}
case *ForLoop:
for _, x := range v.LoopVars {
case *ForStmt:
in.order(v.Vars)
in.order(v.X)
for _, x := range v.Body {
in.order(x)
}
in.order(v.Iterable)
for _, x := range v.Body.Statements {
in.order(x)
case *IfStmt:
in.order(v.Cond)
for _, s := range v.True {
in.order(s)
}
case *IfElse:
for _, condition := range v.Conditions {
in.order(condition.If)
for _, x := range condition.Then.Statements {
in.order(x)
}
if len(v.False) > 0 {
in.order(&v.ElsePos)
}
for _, s := range v.False {
in.order(s)
}
}
if v != nil {
@@ -817,29 +798,19 @@ func (in *input) order(v Expr) {
func (in *input) assignComments() {
// Generate preorder and postorder lists.
in.order(in.file)
in.assignSuffixComments()
in.assignLineComments()
}
// Assign line comments to syntax immediately following.
line := in.lineComments
for _, x := range in.pre {
start, _ := x.Span()
xcom := x.Comment()
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
xcom.Before = append(xcom.Before, line[0])
line = line[1:]
}
}
// Remaining line comments go at end of file.
in.file.After = append(in.file.After, line...)
func (in *input) assignSuffixComments() {
// Assign suffix comments to syntax immediately before.
suffix := in.suffixComments
for i := len(in.post) - 1; i >= 0; i-- {
x := in.post[i]
// Do not assign suffix comments to file
// Do not assign suffix comments to file or to block statements
switch x.(type) {
case *File:
case *File, *DefStmt, *IfStmt, *ForStmt, *CommentBlock:
continue
}
@@ -862,6 +833,27 @@ func (in *input) assignComments() {
in.file.Before = append(in.file.Before, suffix...)
}
func (in *input) assignLineComments() {
// Assign line comments to syntax immediately following.
line := in.lineComments
for _, x := range in.pre {
start, _ := x.Span()
xcom := x.Comment()
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
xcom.Before = append(xcom.Before, line[0])
line = line[1:]
}
// Line comments can be sorted in a wrong order because they get assigned from different
// parts of the lexer and the parser. Restore the original order.
sort.SliceStable(xcom.Before, func(i, j int) bool {
return xcom.Before[i].Start.Byte < xcom.Before[j].Start.Byte
})
}
// Remaining line comments go at end of file.
in.file.After = append(in.file.After, line...)
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -23,19 +23,27 @@ import (
"strings"
)
const nestedIndentation = 2 // Indentation of nested blocks
const listIndentation = 4 // Indentation of multiline expressions
const (
nestedIndentation = 4 // Indentation of nested blocks
listIndentation = 4 // Indentation of multiline expressions
defIndentation = 8 // Indentation of multiline function definitions
)
// Format returns the formatted form of the given BUILD file.
// Format returns the formatted form of the given BUILD or bzl file.
func Format(f *File) []byte {
pr := &printer{}
pr := &printer{fileType: f.Type}
pr.file(f)
return pr.Bytes()
}
// FormatString returns the string form of the given expression.
func FormatString(x Expr) string {
pr := &printer{}
fileType := TypeBuild // for compatibility
if file, ok := x.(*File); ok {
fileType = file.Type
}
pr := &printer{fileType: fileType}
switch x := x.(type) {
case *File:
pr.file(x)
@@ -47,10 +55,13 @@ func FormatString(x Expr) string {
// A printer collects the state during printing of a file or expression.
type printer struct {
fileType FileType // different rules can be applied to different file types.
bytes.Buffer // output buffer
comment []Comment // pending end-of-line comments
margin int // left margin (indent), a number of spaces
depth int // nesting depth inside ( ) [ ] { }
level int // nesting level of def-, if-else- and for-blocks
needsNewLine bool // true if the next statement needs a new line before it
}
// printf prints to the buffer.
@@ -74,6 +85,7 @@ func (p *printer) indent() int {
// To break a line inside an expression that might not be enclosed
// in brackets of some kind, use breakline instead.
func (p *printer) newline() {
p.needsNewLine = false
if len(p.comment) > 0 {
p.printf(" ")
for i, com := range p.comment {
@@ -90,6 +102,28 @@ func (p *printer) newline() {
p.printf("\n%*s", p.margin, "")
}
// softNewline postpones a call to newline to the next call of p.newlineIfNeeded()
// If softNewline is called several times, just one newline is printed.
// Usecase: if there are several nested blocks ending at the same time, for instance
//
// if True:
// for a in b:
// pass
// foo()
//
// the last statement (`pass`) doesn't end with a newline, each block ends with a lazy newline
// which actually gets printed only once when right before the next statement (`foo()`) is printed.
func (p *printer) softNewline() {
p.needsNewLine = true
}
// newlineIfNeeded calls newline if softNewline() has previously been called
func (p *printer) newlineIfNeeded() {
if p.needsNewLine == true {
p.newline()
}
}
// breakline breaks the current line, inserting a continuation \ if needed.
// If no continuation \ is needed, breakline flushes end-of-line comments.
func (p *printer) breakline() {
@@ -128,40 +162,59 @@ func (p *printer) file(f *File) {
p.newline()
}
// If the last expression is in an indented code block there can be spaces in the last line.
p.trim()
p.newlineIfNeeded()
}
func (p *printer) statements(stmts []Expr) {
func (p *printer) nestedStatements(stmts []Expr) {
p.margin += nestedIndentation
p.level++
p.newline()
p.statements(stmts)
p.margin -= nestedIndentation
p.level--
}
func (p *printer) statements(rawStmts []Expr) {
// rawStmts may contain nils if a refactoring tool replaces an actual statement with nil.
// It means the statements don't exist anymore, just ignore them.
stmts := []Expr{}
for _, stmt := range rawStmts {
if stmt != nil {
stmts = append(stmts, stmt)
}
}
for i, stmt := range stmts {
switch stmt := stmt.(type) {
case *CommentBlock:
// comments already handled
case *PythonBlock:
for _, com := range stmt.Before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
p.printf("%s", stmt.Token)
p.newline()
default:
p.expr(stmt, precLow)
}
// Print an empty line break after the expression unless it's a code block.
// For a code block, the line break is generated by its last statement.
if !isCodeBlock(stmt) {
p.newline()
}
// A CommentBlock is an empty statement without a body,
// it doesn't need an line break after the body
if _, ok := stmt.(*CommentBlock); !ok {
p.softNewline()
}
for _, com := range stmt.Comment().After {
p.newlineIfNeeded()
p.printf("%s", strings.TrimSpace(com.Token))
p.softNewline()
}
// Print an empty line break after the statement unless it's the last statement in the sequence.
// In that case a line break should be printed when the block or the file ends.
if i < len(stmts)-1 {
p.newline()
}
if i+1 < len(stmts) && !compactStmt(stmt, stmts[i+1], p.margin == 0) {
if i+1 < len(stmts) && !p.compactStmt(stmt, stmts[i+1]) {
p.newline()
}
}
@@ -171,43 +224,58 @@ func (p *printer) statements(stmts []Expr) {
// should be printed without an intervening blank line.
// We omit the blank line when both are subinclude statements
// and the second one has no leading comments.
func compactStmt(s1, s2 Expr, isTopLevel bool) bool {
func (p *printer) compactStmt(s1, s2 Expr) bool {
if len(s2.Comment().Before) > 0 {
return false
}
if isTopLevel {
return isCall(s1, "load") && isCall(s2, "load")
} else if isLoad(s1) && isLoad(s2) {
// Load statements should be compact
return true
} else if isLoad(s1) || isLoad(s2) {
// Load statements should be separated from anything else
return false
} else if isCommentBlock(s1) || isCommentBlock(s2) {
// Standalone comment blocks shouldn't be attached to other statements
return false
} else if (p.fileType == TypeBuild || p.fileType == TypeWorkspace) && p.level == 0 {
// Top-level statements in a BUILD or WORKSPACE file
return false
} else if isFunctionDefinition(s1) || isFunctionDefinition(s2) {
// On of the statements is a function definition
return false
} else {
return !(isCodeBlock(s1) || isCodeBlock(s2))
// Depend on how the statements have been printed in the original file
_, end := s1.Span()
start, _ := s2.Span()
return start.Line-end.Line <= 1
}
}
// isCall reports whether x is a call to a function with the given name.
func isCall(x Expr, name string) bool {
c, ok := x.(*CallExpr)
if !ok {
return false
}
nam, ok := c.X.(*LiteralExpr)
if !ok {
return false
}
return nam.Token == name
// isLoad reports whether x is a load statement.
func isLoad(x Expr) bool {
_, ok := x.(*LoadStmt)
return ok
}
// isCodeBlock checks if the statement is a code block (def, if, for, etc.)
func isCodeBlock(x Expr) bool {
switch x.(type) {
case *FuncDef:
return true
case *ForLoop:
return true
case *IfElse:
return true
default:
// isCommentBlock reports whether x is a comment block node.
func isCommentBlock(x Expr) bool {
_, ok := x.(*CommentBlock)
return ok
}
// isFunctionDefinition checks if the statement is a def code block
func isFunctionDefinition(x Expr) bool {
_, ok := x.(*DefStmt)
return ok
}
// isDifferentLines reports whether two positions belong to different lines.
// If one of the positions is null (Line == 0), it's not a real position but probably an indicator
// of manually inserted node. Return false in this case
func isDifferentLines(p1, p2 *Position) bool {
if p1.Line == 0 || p2.Line == 0 {
return false
}
return p1.Line != p2.Line
}
// Expression formatting.
@@ -236,42 +304,44 @@ func isCodeBlock(x Expr) bool {
const (
precLow = iota
precAssign
precComma
precColon
precIn
precIfElse
precOr
precAnd
precCmp
precBitwiseOr
precBitwiseXor
precBitwiseAnd
precBitwiseShift
precAdd
precMultiply
precSuffix
precUnary
precConcat
precSuffix
)
// opPrec gives the precedence for operators found in a BinaryExpr.
var opPrec = map[string]int{
"=": precAssign,
"+=": precAssign,
"-=": precAssign,
"*=": precAssign,
"/=": precAssign,
"//=": precAssign,
"%=": precAssign,
"or": precOr,
"and": precAnd,
"<": precCmp,
">": precCmp,
"==": precCmp,
"!=": precCmp,
"<=": precCmp,
">=": precCmp,
"+": precAdd,
"-": precAdd,
"*": precMultiply,
"/": precMultiply,
"//": precMultiply,
"%": precMultiply,
"or": precOr,
"and": precAnd,
"in": precCmp,
"not in": precCmp,
"<": precCmp,
">": precCmp,
"==": precCmp,
"!=": precCmp,
"<=": precCmp,
">=": precCmp,
"+": precAdd,
"-": precAdd,
"*": precMultiply,
"/": precMultiply,
"//": precMultiply,
"%": precMultiply,
"|": precBitwiseOr,
"&": precBitwiseAnd,
"^": precBitwiseXor,
"<<": precBitwiseShift,
">>": precBitwiseShift,
}
// expr prints the expression v to the print buffer.
@@ -291,6 +361,8 @@ func (p *printer) expr(v Expr, outerPrec int) {
// TODO(bazel-team): Check whether it is valid to emit comments right now,
// and if not, insert them earlier in the output instead, at the most
// recent \n not following a \ line.
p.newlineIfNeeded()
if before := v.Comment().Before; len(before) > 0 {
// Want to print a line comment.
// Line comments must be at the current margin.
@@ -330,14 +402,19 @@ func (p *printer) expr(v Expr, outerPrec int) {
case *LiteralExpr:
p.printf("%s", v.Token)
case *Ident:
p.printf("%s", v.Name)
case *BranchStmt:
p.printf("%s", v.Token)
case *StringExpr:
// If the Token is a correct quoting of Value, use it.
// This preserves the specific escaping choices that
// BUILD authors have made, and it also works around
// b/7272572.
if strings.HasPrefix(v.Token, `"`) {
s, triple, err := unquote(v.Token)
if s == v.Value && triple == v.TripleQuote && err == nil {
// If the Token is a correct quoting of Value and has double quotes, use it,
// also use it if it has single quotes and the value itself contains a double quote symbol.
// This preserves the specific escaping choices that BUILD authors have made.
s, triple, err := Unquote(v.Token)
if s == v.Value && triple == v.TripleQuote && err == nil {
if strings.HasPrefix(v.Token, `"`) || strings.ContainsRune(v.Value, '"') {
p.printf("%s", v.Token)
break
}
@@ -348,7 +425,16 @@ func (p *printer) expr(v Expr, outerPrec int) {
case *DotExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
_, xEnd := v.X.Span()
isMultiline := isDifferentLines(&v.NamePos, &xEnd)
if isMultiline {
p.margin += listIndentation
p.breakline()
}
p.printf(".%s", v.Name)
if isMultiline {
p.margin -= listIndentation
}
case *IndexExpr:
addParen(precSuffix)
@@ -388,19 +474,23 @@ func (p *printer) expr(v Expr, outerPrec int) {
} else {
p.printf("%s", v.Op)
}
p.expr(v.X, precUnary)
// Use the next precedence level (precSuffix), so that nested unary expressions are parenthesized,
// for example: `not (-(+(~foo)))` instead of `not -+~foo`
if v.X != nil {
p.expr(v.X, precSuffix)
}
case *LambdaExpr:
addParen(precColon)
p.printf("lambda ")
for i, name := range v.Var {
for i, param := range v.Params {
if i > 0 {
p.printf(", ")
}
p.expr(name, precLow)
p.expr(param, precLow)
}
p.printf(": ")
p.expr(v.Expr, precColon)
p.expr(v.Body[0], precLow) // lambdas should have exactly one statement
case *BinaryExpr:
// Precedence: use the precedence of the operator.
@@ -423,9 +513,6 @@ func (p *printer) expr(v Expr, outerPrec int) {
m := p.margin
if v.LineBreak {
p.margin = p.indent()
if v.Op == "=" {
p.margin += listIndentation
}
}
p.expr(v.X, prec)
@@ -438,95 +525,165 @@ func (p *printer) expr(v Expr, outerPrec int) {
p.expr(v.Y, prec+1)
p.margin = m
case *AssignExpr:
addParen(precAssign)
m := p.margin
if v.LineBreak {
p.margin = p.indent() + listIndentation
}
p.expr(v.LHS, precAssign)
p.printf(" %s", v.Op)
if v.LineBreak {
p.breakline()
} else {
p.printf(" ")
}
p.expr(v.RHS, precAssign+1)
p.margin = m
case *ParenExpr:
p.seq("()", []Expr{v.X}, &v.End, modeParen, false, v.ForceMultiLine)
p.seq("()", &v.Start, &[]Expr{v.X}, &v.End, modeParen, false, v.ForceMultiLine)
case *CallExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
p.seq("()", v.List, &v.End, modeCall, v.ForceCompact, v.ForceMultiLine)
p.seq("()", &v.ListStart, &v.List, &v.End, modeCall, v.ForceCompact, v.ForceMultiLine)
case *LoadStmt:
addParen(precSuffix)
p.printf("load")
args := []Expr{v.Module}
for i := range v.From {
from := v.From[i]
to := v.To[i]
var arg Expr
if from.Name == to.Name {
// Suffix comments are attached to the `to` token,
// Before comments are attached to the `from` token,
// they need to be combined.
arg = from.asString()
arg.Comment().Before = to.Comment().Before
} else {
arg = &AssignExpr{
LHS: to,
Op: "=",
RHS: from.asString(),
}
}
args = append(args, arg)
}
p.seq("()", &v.Load, &args, &v.Rparen, modeLoad, v.ForceCompact, false)
case *ListExpr:
p.seq("[]", v.List, &v.End, modeList, false, v.ForceMultiLine)
p.seq("[]", &v.Start, &v.List, &v.End, modeList, false, v.ForceMultiLine)
case *SetExpr:
p.seq("{}", v.List, &v.End, modeList, false, v.ForceMultiLine)
p.seq("{}", &v.Start, &v.List, &v.End, modeList, false, v.ForceMultiLine)
case *TupleExpr:
p.seq("()", v.List, &v.End, modeTuple, v.ForceCompact, v.ForceMultiLine)
mode := modeTuple
if v.NoBrackets {
mode = modeSeq
}
p.seq("()", &v.Start, &v.List, &v.End, mode, v.ForceCompact, v.ForceMultiLine)
case *DictExpr:
var list []Expr
for _, x := range v.List {
list = append(list, x)
}
p.seq("{}", list, &v.End, modeDict, false, v.ForceMultiLine)
p.seq("{}", &v.Start, &list, &v.End, modeDict, false, v.ForceMultiLine)
case *ListForExpr:
case *Comprehension:
p.listFor(v)
case *ConditionalExpr:
addParen(precSuffix)
p.expr(v.Then, precSuffix)
p.expr(v.Then, precIfElse)
p.printf(" if ")
p.expr(v.Test, precSuffix)
p.expr(v.Test, precIfElse)
p.printf(" else ")
p.expr(v.Else, precSuffix)
p.expr(v.Else, precIfElse)
case *ReturnExpr:
case *ReturnStmt:
p.printf("return")
if v.X != nil {
if v.Result != nil {
p.printf(" ")
p.expr(v.X, precSuffix)
p.expr(v.Result, precLow)
}
case *FuncDef:
case *DefStmt:
p.printf("def ")
p.printf(v.Name)
p.seq("()", v.Args, &v.End, modeCall, v.ForceCompact, v.ForceMultiLine)
p.seq("()", &v.StartPos, &v.Params, nil, modeDef, v.ForceCompact, v.ForceMultiLine)
p.printf(":")
p.margin += nestedIndentation
p.newline()
p.statements(v.Body.Statements)
p.margin -= nestedIndentation
p.nestedStatements(v.Body)
case *ForLoop:
case *ForStmt:
p.printf("for ")
for i, loopVar := range v.LoopVars {
if i > 0 {
p.printf(", ")
}
p.expr(loopVar, precLow)
}
p.expr(v.Vars, precLow)
p.printf(" in ")
p.expr(v.Iterable, precLow)
p.expr(v.X, precLow)
p.printf(":")
p.margin += nestedIndentation
p.newline()
p.statements(v.Body.Statements)
p.margin -= nestedIndentation
p.nestedStatements(v.Body)
case *IfElse:
for i, block := range v.Conditions {
if i == 0 {
p.printf("if ")
} else if block.If == nil {
p.newline()
p.printf("else")
} else {
p.newline()
p.printf("elif ")
}
if block.If != nil {
p.expr(block.If, precLow)
case *IfStmt:
block := v
isFirst := true
needsEmptyLine := false
for {
p.newlineIfNeeded()
if !isFirst {
if needsEmptyLine {
p.newline()
}
p.printf("el")
}
p.printf("if ")
p.expr(block.Cond, precLow)
p.printf(":")
p.margin += nestedIndentation
p.newline()
p.statements(block.Then.Statements)
p.margin -= nestedIndentation
p.nestedStatements(block.True)
isFirst = false
_, end := block.True[len(block.True)-1].Span()
needsEmptyLine = block.ElsePos.Pos.Line-end.Line > 1
// If the else-block contains just one statement which is an IfStmt, flatten it as a part
// of if-elif chain.
// Don't do it if the "else" statement has a suffix comment or if the next "if" statement
// has a before-comment.
if len(block.False) != 1 {
break
}
next, ok := block.False[0].(*IfStmt)
if !ok {
break
}
if len(block.ElsePos.Comment().Suffix) == 0 && len(next.Comment().Before) == 0 {
block = next
continue
}
break
}
if len(block.False) > 0 {
p.newlineIfNeeded()
if needsEmptyLine {
p.newline()
}
p.printf("else:")
p.comment = append(p.comment, block.ElsePos.Comment().Suffix...)
p.nestedStatements(block.False)
}
case *ForClause:
p.printf("for ")
p.expr(v.Vars, precLow)
p.printf(" in ")
p.expr(v.X, precLow)
case *IfClause:
p.printf("if ")
p.expr(v.Cond, precLow)
}
// Add closing parenthesis if needed.
@@ -553,87 +710,159 @@ const (
modeParen // (x)
modeDict // {x:y}
modeSeq // x, y
modeDef // def f(x, y)
modeLoad // load(a, b, c)
)
// useCompactMode reports whether a sequence should be formatted in a compact mode
func (p *printer) useCompactMode(start *Position, list *[]Expr, end *End, mode seqMode, forceCompact, forceMultiLine bool) bool {
// If there are line comments, use multiline
// so we can print the comments before the closing bracket.
for _, x := range *list {
if len(x.Comment().Before) > 0 || (len(x.Comment().Suffix) > 0 && mode != modeDef) {
return false
}
}
if end != nil && len(end.Before) > 0 {
return false
}
// Implicit tuples are always compact
if mode == modeSeq {
return true
}
// In the Default and .bzl printing modes try to keep the original printing style.
// Non-top-level statements and lists of arguments of a function definition
// should also keep the original style regardless of the mode.
if (p.level != 0 || p.fileType == TypeDefault || p.fileType == TypeBzl || mode == modeDef) && mode != modeLoad {
// If every element (including the brackets) ends on the same line where the next element starts,
// use the compact mode, otherwise use multiline mode.
// If an node's line number is 0, it means it doesn't appear in the original file,
// its position shouldn't be taken into account. Unless a sequence is new,
// then use multiline mode if ForceMultiLine mode was set.
previousEnd := start
isNewSeq := start.Line == 0
for _, x := range *list {
start, end := x.Span()
isNewSeq = isNewSeq && start.Line == 0
if isDifferentLines(&start, previousEnd) {
return false
}
if end.Line != 0 {
previousEnd = &end
}
}
if end != nil {
isNewSeq = isNewSeq && end.Pos.Line == 0
if isDifferentLines(previousEnd, &end.Pos) {
return false
}
}
if !isNewSeq {
return true
}
// Use the forceMultiline value for new sequences.
return !forceMultiLine
}
// In Build mode, use the forceMultiline and forceCompact values
if forceMultiLine {
return false
}
if forceCompact {
return true
}
// If neither of the flags are set, use compact mode only for empty or 1-element sequences
return len(*list) <= 1
}
// seq formats a list of values inside a given bracket pair (brack = "()", "[]", "{}").
// The end node holds any trailing comments to be printed just before the
// closing bracket.
// The mode parameter specifies the sequence mode (see above).
// If multiLine is true, seq avoids the compact form even
// for 0- and 1-element sequences.
func (p *printer) seq(brack string, list []Expr, end *End, mode seqMode, forceCompact, forceMultiLine bool) {
p.printf("%s", brack[:1])
func (p *printer) seq(brack string, start *Position, list *[]Expr, end *End, mode seqMode, forceCompact, forceMultiLine bool) {
if mode != modeSeq {
p.printf("%s", brack[:1])
}
p.depth++
// If there are line comments, force multiline
// so we can print the comments before the closing bracket.
for _, x := range list {
if len(x.Comment().Before) > 0 {
forceMultiLine = true
defer func() {
p.depth--
if mode != modeSeq {
p.printf("%s", brack[1:])
}
}
if len(end.Before) > 0 {
forceMultiLine = true
}
}()
// Resolve possibly ambiguous call arguments explicitly
// instead of depending on implicit resolution in logic below.
if forceMultiLine {
forceCompact = false
}
switch {
case len(list) == 0 && !forceMultiLine:
// Compact form: print nothing.
case len(list) == 1 && !forceMultiLine:
// Compact form.
p.expr(list[0], precLow)
// Tuple must end with comma, to mark it as a tuple.
if mode == modeTuple {
p.printf(",")
}
case forceCompact:
// Compact form but multiple elements.
for i, x := range list {
if p.useCompactMode(start, list, end, mode, forceCompact, forceMultiLine) {
for i, x := range *list {
if i > 0 {
p.printf(", ")
}
p.expr(x, precLow)
}
default:
// Multi-line form.
p.margin += listIndentation
for i, x := range list {
// If we are about to break the line before the first
// element and there are trailing end-of-line comments
// waiting to be printed, delay them and print them as
// whole-line comments preceding that element.
// Do this by printing a newline ourselves and positioning
// so that the end-of-line comment, with the two spaces added,
// will line up with the current margin.
if i == 0 && len(p.comment) > 0 {
p.printf("\n%*s", p.margin-2, "")
}
p.newline()
p.expr(x, precLow)
if mode != modeParen || i+1 < len(list) {
p.printf(",")
}
// Single-element tuple must end with comma, to mark it as a tuple.
if len(*list) == 1 && mode == modeTuple {
p.printf(",")
}
// Final comments.
return
}
// Multi-line form.
indentation := listIndentation
if mode == modeDef {
indentation = defIndentation
}
p.margin += indentation
for i, x := range *list {
// If we are about to break the line before the first
// element and there are trailing end-of-line comments
// waiting to be printed, delay them and print them as
// whole-line comments preceding that element.
// Do this by printing a newline ourselves and positioning
// so that the end-of-line comment, with the two spaces added,
// will line up with the current margin.
if i == 0 && len(p.comment) > 0 {
p.printf("\n%*s", p.margin-2, "")
}
p.newline()
p.expr(x, precLow)
if i+1 < len(*list) || needsTrailingComma(mode, x) {
p.printf(",")
}
}
// Final comments.
if end != nil {
for _, com := range end.Before {
p.newline()
p.printf("%s", strings.TrimSpace(com.Token))
}
p.margin -= listIndentation
}
p.margin -= indentation
// in modeDef print the closing bracket on the same line
if mode != modeDef {
p.newline()
}
p.depth--
p.printf("%s", brack[1:])
}
func needsTrailingComma(mode seqMode, v Expr) bool {
switch mode {
case modeDef:
return false
case modeParen:
return false
case modeCall:
// *args and **kwargs in fn calls
switch v := v.(type) {
case *UnaryExpr:
if v.Op == "*" || v.Op == "**" {
return false
}
}
}
return true
}
// listFor formats a ListForExpr (list comprehension).
@@ -647,7 +876,7 @@ func (p *printer) seq(brack string, list []Expr, end *End, mode seqMode, forceCo
// if c
// ]
//
func (p *printer) listFor(v *ListForExpr) {
func (p *printer) listFor(v *Comprehension) {
multiLine := v.ForceMultiLine || len(v.End.Before) > 0
// space breaks the line in multiline mode
@@ -660,41 +889,23 @@ func (p *printer) listFor(v *ListForExpr) {
}
}
if v.Brack != "" {
p.depth++
p.printf("%s", v.Brack[:1])
open, close := "[", "]"
if v.Curly {
open, close = "{", "}"
}
p.depth++
p.printf("%s", open)
if multiLine {
if v.Brack != "" {
p.margin += listIndentation
}
p.margin += listIndentation
p.newline()
}
p.expr(v.X, precLow)
p.expr(v.Body, precLow)
for _, c := range v.For {
for _, c := range v.Clauses {
space()
p.printf("for ")
for i, name := range c.For.Var {
if i > 0 {
p.printf(", ")
}
p.expr(name, precLow)
}
p.printf(" in ")
p.expr(c.For.Expr, precLow)
p.comment = append(p.comment, c.For.Comment().Suffix...)
for _, i := range c.Ifs {
space()
p.printf("if ")
p.expr(i.Cond, precLow)
p.comment = append(p.comment, i.Comment().Suffix...)
}
p.comment = append(p.comment, c.Comment().Suffix...)
p.expr(c, precLow)
}
if multiLine {
@@ -702,16 +913,12 @@ func (p *printer) listFor(v *ListForExpr) {
p.newline()
p.printf("%s", strings.TrimSpace(com.Token))
}
if v.Brack != "" {
p.margin -= listIndentation
}
p.margin -= listIndentation
p.newline()
}
if v.Brack != "" {
p.printf("%s", v.Brack[1:])
p.depth--
}
p.printf("%s", close)
p.depth--
}
func (p *printer) isTopLevel() bool {

View File

@@ -59,10 +59,10 @@ var esc = [256]byte{
// being used as shell arguments containing regular expressions.
const notEsc = " !#$%&()*+,-./:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~"
// unquote unquotes the quoted string, returning the actual
// Unquote unquotes the quoted string, returning the actual
// string value, whether the original was triple-quoted, and
// an error describing invalid input.
func unquote(quoted string) (s string, triple bool, err error) {
func Unquote(quoted string) (s string, triple bool, err error) {
// Check for raw prefix: means don't interpret the inner \.
raw := false
if strings.HasPrefix(quoted, "r") {

View File

@@ -18,12 +18,12 @@ distributed under the License is distributed on an "AS IS" BASIS,
package build
import (
"github.com/bazelbuild/buildtools/tables"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"github.com/bazelbuild/buildtools/tables"
)
// For debugging: flag to disable certain rewrites.
@@ -62,55 +62,78 @@ func Rewrite(f *File, info *RewriteInfo) {
for _, r := range rewrites {
if !disabled(r.name) {
r.fn(f, info)
if f.Type&r.scope != 0 {
r.fn(f, info)
}
}
}
}
// RewriteInfo collects information about what Rewrite did.
type RewriteInfo struct {
EditLabel int // number of label strings edited
NameCall int // number of calls with argument names added
SortCall int // number of call argument lists sorted
SortStringList int // number of string lists sorted
UnsafeSort int // number of unsafe string lists sorted
Log []string // log entries - may change
EditLabel int // number of label strings edited
NameCall int // number of calls with argument names added
SortCall int // number of call argument lists sorted
SortStringList int // number of string lists sorted
UnsafeSort int // number of unsafe string lists sorted
SortLoad int // number of load argument lists sorted
FormatDocstrings int // number of reindented docstrings
ReorderArguments int // number of reordered function call arguments
EditOctal int // number of edited octals
Log []string // log entries - may change
}
func (info *RewriteInfo) String() string {
s := ""
if info.EditLabel > 0 {
s += " label"
// Stats returns a map with statistics about applied rewrites
func (info *RewriteInfo) Stats() map[string]int {
return map[string]int{
"label": info.EditLabel,
"callname": info.NameCall,
"callsort": info.SortCall,
"listsort": info.SortStringList,
"unsafesort": info.UnsafeSort,
"sortload": info.SortLoad,
"formatdocstrings": info.FormatDocstrings,
"reorderarguments": info.ReorderArguments,
"editoctal": info.EditOctal,
}
if info.NameCall > 0 {
s += " callname"
}
if info.SortCall > 0 {
s += " callsort"
}
if info.SortStringList > 0 {
s += " listsort"
}
if info.UnsafeSort > 0 {
s += " unsafesort"
}
if s != "" {
s = s[1:]
}
return s
}
// Each rewrite function can be either applied for BUILD files, other files (such as .bzl),
// or all files.
const (
scopeDefault = TypeDefault | TypeBzl // .bzl and generic Starlark files
scopeBuild = TypeBuild | TypeWorkspace // BUILD and WORKSPACE files
scopeBoth = scopeDefault | scopeBuild
)
// rewrites is the list of all Buildifier rewrites, in the order in which they are applied.
// The order here matters: for example, label canonicalization must happen
// before sorting lists of strings.
var rewrites = []struct {
name string
fn func(*File, *RewriteInfo)
name string
fn func(*File, *RewriteInfo)
scope FileType
}{
{"callsort", sortCallArgs},
{"label", fixLabels},
{"listsort", sortStringLists},
{"multiplus", fixMultilinePlus},
{"callsort", sortCallArgs, scopeBuild},
{"label", fixLabels, scopeBuild},
{"listsort", sortStringLists, scopeBoth},
{"multiplus", fixMultilinePlus, scopeBuild},
{"loadsort", sortAllLoadArgs, scopeBoth},
{"formatdocstrings", formatDocstrings, scopeBoth},
{"reorderarguments", reorderArguments, scopeBoth},
{"editoctal", editOctals, scopeBoth},
}
// DisableLoadSortForBuildFiles disables the loadsort transformation for BUILD files.
// This is a temporary function for backward compatibility, can be called if there's plenty of
// already formatted BUILD files that shouldn't be changed by the transformation.
func DisableLoadSortForBuildFiles() {
for i := range rewrites {
if rewrites[i].name == "loadsort" {
rewrites[i].scope = scopeDefault
break
}
}
}
// leaveAlone reports whether any of the nodes on the stack are marked
@@ -212,14 +235,19 @@ func fixLabels(f *File, info *RewriteInfo) {
editPerformed := false
if tables.StripLabelLeadingSlashes && strings.HasPrefix(str.Value, "//") {
if path.Dir(f.Path) == "." || !strings.HasPrefix(str.Value, "//:") {
if filepath.Dir(f.Path) == "." || !strings.HasPrefix(str.Value, "//:") {
editPerformed = true
str.Value = str.Value[2:]
}
}
if tables.ShortenAbsoluteLabelsToRelative {
thisPackage := labelPrefix + path.Dir(f.Path)
thisPackage := labelPrefix + filepath.Dir(f.Path)
// filepath.Dir on Windows uses backslashes as separators, while labels always have slashes.
if filepath.Separator != '/' {
thisPackage = strings.Replace(thisPackage, string(filepath.Separator), "/", -1)
}
if str.Value == thisPackage {
editPerformed = true
str.Value = ":" + path.Base(str.Value)
@@ -255,18 +283,18 @@ func fixLabels(f *File, info *RewriteInfo) {
if leaveAlone1(v.List[i]) {
continue
}
as, ok := v.List[i].(*BinaryExpr)
if !ok || as.Op != "=" {
as, ok := v.List[i].(*AssignExpr)
if !ok {
continue
}
key, ok := as.X.(*LiteralExpr)
if !ok || !tables.IsLabelArg[key.Token] || tables.LabelBlacklist[callName(v)+"."+key.Token] {
key, ok := as.LHS.(*Ident)
if !ok || !tables.IsLabelArg[key.Name] || tables.LabelBlacklist[callName(v)+"."+key.Name] {
continue
}
if leaveAlone1(as.Y) {
if leaveAlone1(as.RHS) {
continue
}
if list, ok := as.Y.(*ListExpr); ok {
if list, ok := as.RHS.(*ListExpr); ok {
for i := range list.List {
if leaveAlone1(list.List[i]) {
continue
@@ -275,7 +303,7 @@ func fixLabels(f *File, info *RewriteInfo) {
shortenLabel(list.List[i])
}
}
if set, ok := as.Y.(*SetExpr); ok {
if set, ok := as.RHS.(*SetExpr); ok {
for i := range set.List {
if leaveAlone1(set.List[i]) {
continue
@@ -284,8 +312,8 @@ func fixLabels(f *File, info *RewriteInfo) {
shortenLabel(set.List[i])
}
} else {
joinLabel(&as.Y)
shortenLabel(as.Y)
joinLabel(&as.RHS)
shortenLabel(as.RHS)
}
}
}
@@ -295,11 +323,11 @@ func fixLabels(f *File, info *RewriteInfo) {
// callName returns the name of the rule being called by call.
// If the call is not to a literal rule name, callName returns "".
func callName(call *CallExpr) string {
rule, ok := call.X.(*LiteralExpr)
rule, ok := call.X.(*Ident)
if !ok {
return ""
}
return rule.Token
return rule.Name
}
// sortCallArgs sorts lists of named arguments to a call.
@@ -368,9 +396,9 @@ func ruleNamePriority(rule, arg string) int {
// If x is of the form key=value, argName returns the string key.
// Otherwise argName returns "".
func argName(x Expr) string {
if as, ok := x.(*BinaryExpr); ok && as.Op == "=" {
if id, ok := as.X.(*LiteralExpr); ok {
return id.Token
if as, ok := x.(*AssignExpr); ok {
if id, ok := as.LHS.(*Ident); ok {
return id.Name
}
}
return ""
@@ -416,31 +444,31 @@ func sortStringLists(f *File, info *RewriteInfo) {
if leaveAlone1(arg) {
continue
}
as, ok := arg.(*BinaryExpr)
if !ok || as.Op != "=" || leaveAlone1(as) || doNotSort(as) {
as, ok := arg.(*AssignExpr)
if !ok || leaveAlone1(as) || doNotSort(as) {
continue
}
key, ok := as.X.(*LiteralExpr)
key, ok := as.LHS.(*Ident)
if !ok {
continue
}
context := rule + "." + key.Token
if !tables.IsSortableListArg[key.Token] || tables.SortableBlacklist[context] {
context := rule + "." + key.Name
if !tables.IsSortableListArg[key.Name] || tables.SortableBlacklist[context] || f.Type == TypeDefault || f.Type == TypeBzl {
continue
}
if disabled("unsafesort") && !tables.SortableWhitelist[context] && !allowedSort(context) {
continue
}
sortStringList(as.Y, info, context)
sortStringList(as.RHS, info, context)
}
case *BinaryExpr:
case *AssignExpr:
if disabled("unsafesort") {
return
}
// "keep sorted" comment on x = list forces sorting of list.
as := v
if as.Op == "=" && keepSorted(as) {
sortStringList(as.Y, info, "?")
if keepSorted(as) {
sortStringList(as.RHS, info, "?")
}
case *KeyValueExpr:
if disabled("unsafesort") {
@@ -455,7 +483,7 @@ func sortStringLists(f *File, info *RewriteInfo) {
return
}
// "keep sorted" comment above first list element also forces sorting of list.
if len(v.List) > 0 && keepSorted(v.List[0]) {
if len(v.List) > 0 && (keepSorted(v) || keepSorted(v.List[0])) {
sortStringList(v, info, "?")
}
}
@@ -476,7 +504,7 @@ func sortStringList(x Expr, info *RewriteInfo, context string) {
return
}
forceSort := keepSorted(list.List[0])
forceSort := keepSorted(list) || keepSorted(list.List[0])
// TODO(bazel-team): Decide how to recognize lists that cannot
// be sorted. Avoiding all lists with comments avoids sorting
@@ -569,11 +597,11 @@ func callArgName(stk []Expr) string {
if !ok {
return ""
}
rule, ok := call.X.(*LiteralExpr)
rule, ok := call.X.(*Ident)
if !ok {
return ""
}
return rule.Token + "." + arg
return rule.Name + "." + arg
}
// A stringSortKey records information about a single string literal to be
@@ -794,6 +822,17 @@ func fixMultilinePlus(f *File, info *RewriteInfo) {
})
}
// sortAllLoadArgs sorts all load arguments in the file
func sortAllLoadArgs(f *File, info *RewriteInfo) {
Walk(f, func(v Expr, stk []Expr) {
if load, ok := v.(*LoadStmt); ok {
if SortLoadArgs(load) {
info.SortLoad++
}
}
})
}
// hasComments reports whether any comments are associated with
// the list or its elements.
func hasComments(list *ListExpr) (line, suffix bool) {
@@ -815,3 +854,149 @@ func hasComments(list *ListExpr) (line, suffix bool) {
}
return
}
// A wrapper for a LoadStmt's From and To slices for consistent sorting of their contents.
// It's assumed that the following slices have the same length. The contents are sorted by
// the `To` attribute, but all items with equal "From" and "To" parts are placed before the items
// with different parts.
type loadArgs struct {
From []*Ident
To []*Ident
modified bool
}
func (args loadArgs) Len() int {
return len(args.From)
}
func (args loadArgs) Swap(i, j int) {
args.From[i], args.From[j] = args.From[j], args.From[i]
args.To[i], args.To[j] = args.To[j], args.To[i]
args.modified = true
}
func (args loadArgs) Less(i, j int) bool {
// Arguments with equal "from" and "to" parts are prioritized
equalI := args.From[i].Name == args.To[i].Name
equalJ := args.From[j].Name == args.To[j].Name
if equalI != equalJ {
// If equalI and !equalJ, return true, otherwise false.
// Equivalently, return equalI.
return equalI
}
return args.To[i].Name < args.To[j].Name
}
// SortLoadArgs sorts a load statement arguments (lexicographically, but positional first)
func SortLoadArgs(load *LoadStmt) bool {
args := loadArgs{From: load.From, To: load.To}
sort.Sort(args)
return args.modified
}
// formatDocstrings fixes the indentation and trailing whitespace of docstrings
func formatDocstrings(f *File, info *RewriteInfo) {
Walk(f, func(v Expr, stk []Expr) {
def, ok := v.(*DefStmt)
if !ok || len(def.Body) == 0 {
return
}
docstring, ok := def.Body[0].(*StringExpr)
if !ok || !docstring.TripleQuote {
return
}
oldIndentation := docstring.Start.LineRune - 1 // LineRune starts with 1
newIndentation := nestedIndentation * len(stk)
// Operate on Token, not Value, because their line breaks can be different if a line ends with
// a backslash.
updatedToken := formatString(docstring.Token, oldIndentation, newIndentation)
if updatedToken != docstring.Token {
docstring.Token = updatedToken
// Update the value to keep it consistent with Token
docstring.Value, _, _ = Unquote(updatedToken)
info.FormatDocstrings++
}
})
}
// formatString modifies a string value of a docstring to match the new indentation level and
// to remove trailing whitespace from its lines.
func formatString(value string, oldIndentation, newIndentation int) string {
difference := newIndentation - oldIndentation
lines := strings.Split(value, "\n")
for i, line := range lines {
if i == 0 {
// The first line shouldn't be touched because it starts right after ''' or """
continue
}
if difference > 0 {
line = strings.Repeat(" ", difference) + line
} else {
for i, rune := range line {
if i == -difference || rune != ' ' {
line = line[i:]
break
}
}
}
if i != len(lines)-1 {
// Remove trailing space from the line unless it's the last line that's responsible
// for the indentation of the closing `"""`
line = strings.TrimRight(line, " ")
}
lines[i] = line
}
return strings.Join(lines, "\n")
}
// argumentType returns an integer by which funcall arguments can be sorted:
// 1 for positional, 2 for named, 3 for *args, 4 for **kwargs
func argumentType(expr Expr) int {
switch expr := expr.(type) {
case *UnaryExpr:
switch expr.Op {
case "**":
return 4
case "*":
return 3
}
case *AssignExpr:
return 2
}
return 1
}
// reorderArguments fixes the order of arguments of a function call
// (positional, named, *args, **kwargs)
func reorderArguments(f *File, info *RewriteInfo) {
Walk(f, func(expr Expr, stack []Expr) {
call, ok := expr.(*CallExpr)
if !ok {
return
}
compare := func(i, j int) bool {
return argumentType(call.List[i]) < argumentType(call.List[j])
}
if !sort.SliceIsSorted(call.List, compare) {
sort.SliceStable(call.List, compare)
info.ReorderArguments++
}
})
}
// editOctals inserts 'o' into octal numbers to make it more obvious they are octal
// 0123 -> 0o123
func editOctals(f *File, info *RewriteInfo) {
Walk(f, func(expr Expr, stack []Expr) {
l, ok := expr.(*LiteralExpr)
if !ok {
return
}
if len(l.Token) > 1 && l.Token[0] == '0' && l.Token[1] >= '0' && l.Token[1] <= '9' {
l.Token = "0o" + l.Token[1:]
info.EditOctal++
}
})
}

View File

@@ -19,8 +19,8 @@ distributed under the License is distributed on an "AS IS" BASIS,
package build
import (
"strings"
"path/filepath"
"strings"
)
// A Rule represents a single BUILD rule.
@@ -29,6 +29,11 @@ type Rule struct {
ImplicitName string // The name which should be used if the name attribute is not set. See the comment on File.implicitRuleName.
}
// NewRule is a simple constructor for Rule.
func NewRule(call *CallExpr) *Rule {
return &Rule{call, ""}
}
func (f *File) Rule(call *CallExpr) *Rule {
r := &Rule{call, ""}
if r.AttrString("name") == "" {
@@ -43,15 +48,26 @@ func (f *File) Rules(kind string) []*Rule {
var all []*Rule
for _, stmt := range f.Stmt {
call, ok := stmt.(*CallExpr)
if !ok {
continue
}
rule := f.Rule(call)
if kind != "" && rule.Kind() != kind {
continue
}
all = append(all, rule)
Walk(stmt, func(x Expr, stk []Expr) {
call, ok := x.(*CallExpr)
if !ok {
return
}
// Skip nested calls.
for _, frame := range stk {
if _, ok := frame.(*CallExpr); ok {
return
}
}
// Check if the rule kind is correct.
rule := f.Rule(call)
if kind != "" && rule.Kind() != kind {
return
}
all = append(all, rule)
})
}
return all
@@ -145,11 +161,11 @@ func (r *Rule) Kind() string {
names = append(names, x.Name)
expr = x.X
}
x, ok := expr.(*LiteralExpr)
x, ok := expr.(*Ident)
if !ok {
return ""
}
names = append(names, x.Token)
names = append(names, x.Name)
// Reverse the elements since the deepest expression contains the leading literal
for l, r := 0, len(names)-1; l < r; l, r = l+1, r-1 {
names[l], names[r] = names[r], names[l]
@@ -161,18 +177,23 @@ func (r *Rule) Kind() string {
func (r *Rule) SetKind(kind string) {
names := strings.Split(kind, ".")
var expr Expr
expr = &LiteralExpr{Token: names[0]}
expr = &Ident{Name: names[0]}
for _, name := range names[1:] {
expr = &DotExpr{X: expr, Name: name}
}
r.Call.X = expr
}
// ExplicitName returns the rule's target name if it's explicitly provided as a string value, "" otherwise.
func (r *Rule) ExplicitName() string {
return r.AttrString("name")
}
// Name returns the rule's target name.
// If the rule has no explicit target name, Name returns the implicit name if there is one, else the empty string.
func (r *Rule) Name() string {
explicitName := r.AttrString("name")
if explicitName == "" {
explicitName := r.ExplicitName()
if explicitName == "" && r.Kind() != "package" {
return r.ImplicitName
}
return explicitName
@@ -182,26 +203,25 @@ func (r *Rule) Name() string {
func (r *Rule) AttrKeys() []string {
var keys []string
for _, expr := range r.Call.List {
if binExpr, ok := expr.(*BinaryExpr); ok && binExpr.Op == "=" {
if keyExpr, ok := binExpr.X.(*LiteralExpr); ok {
keys = append(keys, keyExpr.Token)
if as, ok := expr.(*AssignExpr); ok {
if keyExpr, ok := as.LHS.(*Ident); ok {
keys = append(keys, keyExpr.Name)
}
}
}
return keys
}
// AttrDefn returns the BinaryExpr defining the rule's attribute with the given key.
// That is, the result is a *BinaryExpr with Op == "=".
// AttrDefn returns the AssignExpr defining the rule's attribute with the given key.
// If the rule has no such attribute, AttrDefn returns nil.
func (r *Rule) AttrDefn(key string) *BinaryExpr {
func (r *Rule) AttrDefn(key string) *AssignExpr {
for _, kv := range r.Call.List {
as, ok := kv.(*BinaryExpr)
if !ok || as.Op != "=" {
as, ok := kv.(*AssignExpr)
if !ok {
continue
}
k, ok := as.X.(*LiteralExpr)
if !ok || k.Token != key {
k, ok := as.LHS.(*Ident)
if !ok || k.Name != key {
continue
}
return as
@@ -217,7 +237,7 @@ func (r *Rule) Attr(key string) Expr {
if as == nil {
return nil
}
return as.Y
return as.RHS
}
// DelAttr deletes the rule's attribute with the named key.
@@ -225,17 +245,17 @@ func (r *Rule) Attr(key string) Expr {
func (r *Rule) DelAttr(key string) Expr {
list := r.Call.List
for i, kv := range list {
as, ok := kv.(*BinaryExpr)
if !ok || as.Op != "=" {
as, ok := kv.(*AssignExpr)
if !ok {
continue
}
k, ok := as.X.(*LiteralExpr)
if !ok || k.Token != key {
k, ok := as.LHS.(*Ident)
if !ok || k.Name != key {
continue
}
copy(list[i:], list[i+1:])
r.Call.List = list[:len(list)-1]
return as.Y
return as.RHS
}
return nil
}
@@ -246,15 +266,15 @@ func (r *Rule) DelAttr(key string) Expr {
func (r *Rule) SetAttr(key string, val Expr) {
as := r.AttrDefn(key)
if as != nil {
as.Y = val
as.RHS = val
return
}
r.Call.List = append(r.Call.List,
&BinaryExpr{
X: &LiteralExpr{Token: key},
Op: "=",
Y: val,
&AssignExpr{
LHS: &Ident{Name: key},
Op: "=",
RHS: val,
},
)
}
@@ -265,11 +285,14 @@ func (r *Rule) SetAttr(key string, val Expr) {
// If the rule has no such attribute or the attribute is not an identifier or number,
// AttrLiteral returns "".
func (r *Rule) AttrLiteral(key string) string {
lit, ok := r.Attr(key).(*LiteralExpr)
if !ok {
return ""
value := r.Attr(key)
if ident, ok := value.(*Ident); ok {
return ident.Name
}
return lit.Token
if literal, ok := value.(*LiteralExpr); ok {
return literal.Token
}
return ""
}
// AttrString returns the value of the rule's attribute

View File

@@ -79,19 +79,41 @@ func (c *Comments) Comment() *Comments {
return c
}
// A File represents an entire BUILD file.
// stmtsEnd returns the end position of the last non-nil statement
func stmtsEnd(stmts []Expr) Position {
for i := len(stmts) - 1; i >= 0; i-- {
if stmts[i] != nil {
_, end := stmts[i].Span()
return end
}
}
return Position{}
}
// A File represents an entire BUILD or .bzl file.
type File struct {
Path string // file path, relative to workspace directory
Pkg string // optional; the package of the file
Type FileType
Comments
Stmt []Expr
}
// DisplayPath returns the filename if it's not empty, "<stdin>" otherwise
func (f *File) DisplayPath() string {
if f.Path == "" {
return "<stdin>"
}
return f.Path
}
func (f *File) Span() (start, end Position) {
if len(f.Stmt) == 0 {
return
p := Position{Line: 1, LineRune: 1}
return p, p
}
start, _ = f.Stmt[0].Span()
_, end = f.Stmt[len(f.Stmt)-1].Span()
start = Position{}
end = stmtsEnd(f.Stmt)
return start, end
}
@@ -106,18 +128,39 @@ func (x *CommentBlock) Span() (start, end Position) {
return x.Start, x.Start
}
// A PythonBlock represents a blob of Python code, typically a def or for loop.
type PythonBlock struct {
// An Ident represents an identifier.
type Ident struct {
Comments
Start Position
Token string // raw Python code, including final newline
NamePos Position
Name string
}
func (x *PythonBlock) Span() (start, end Position) {
return x.Start, x.Start.add(x.Token)
func (x *Ident) Span() (start, end Position) {
return x.NamePos, x.NamePos.add(x.Name)
}
// A LiteralExpr represents a literal identifier or number.
// BranchStmt represents a `pass`, `break`, or `continue` statement.
type BranchStmt struct {
Comments
Token string // pass, break, continue
TokenPos Position
}
func (x *BranchStmt) Span() (start, end Position) {
return x.TokenPos, x.TokenPos.add(x.Token)
}
func (x *Ident) asString() *StringExpr {
_, end := x.Span()
return &StringExpr{
Comments: x.Comments,
Start: x.NamePos,
Value: x.Name,
End: end,
}
}
// A LiteralExpr represents a literal number.
type LiteralExpr struct {
Comments
Start Position
@@ -188,32 +231,32 @@ func (x *DotExpr) Span() (start, end Position) {
return start, x.NamePos.add(x.Name)
}
// A ListForExpr represents a list comprehension expression: [X for ... if ...].
type ListForExpr struct {
// A Comprehension represents a list comprehension expression: [X for ... if ...].
type Comprehension struct {
Comments
Curly bool // curly braces (as opposed to square brackets)
Lbrack Position
Body Expr
Clauses []Expr // = *ForClause | *IfClause
ForceMultiLine bool // split expression across multiple lines
Brack string // "", "()", or "[]"
Start Position
X Expr
For []*ForClauseWithIfClausesOpt
End
}
func (x *ListForExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add("]")
func (x *Comprehension) Span() (start, end Position) {
return x.Lbrack, x.End.Pos.add("]")
}
// A ForClause represents a for clause in a list comprehension: for Var in Expr.
type ForClause struct {
Comments
For Position
Var []Expr
Vars Expr
In Position
Expr Expr
X Expr
}
func (x *ForClause) Span() (start, end Position) {
_, end = x.Expr.Span()
_, end = x.X.Span()
return x.For, end
}
@@ -229,23 +272,6 @@ func (x *IfClause) Span() (start, end Position) {
return x.If, end
}
// A ForClauseWithIfClausesOpt represents a for clause in a list comprehension followed by optional
// if expressions: for ... in ... [if ... if ...]
type ForClauseWithIfClausesOpt struct {
Comments
For *ForClause
Ifs []*IfClause
}
func (x *ForClauseWithIfClausesOpt) Span() (start, end Position) {
start, end = x.For.Span()
if len(x.Ifs) > 0 {
_, end = x.Ifs[len(x.Ifs)-1].Span()
}
return start, end
}
// A KeyValueExpr represents a dictionary entry: Key: Value.
type KeyValueExpr struct {
Comments
@@ -264,8 +290,7 @@ func (x *KeyValueExpr) Span() (start, end Position) {
type DictExpr struct {
Comments
Start Position
List []Expr // all *KeyValueExprs
Comma Position // position of trailing comma, if any
List []Expr // all *KeyValueExprs
End
ForceMultiLine bool // force multiline form when printing
}
@@ -279,7 +304,6 @@ type ListExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
End
ForceMultiLine bool // force multiline form when printing
}
@@ -293,7 +317,6 @@ type SetExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
End
ForceMultiLine bool // force multiline form when printing
}
@@ -305,16 +328,21 @@ func (x *SetExpr) Span() (start, end Position) {
// A TupleExpr represents a tuple literal: (List)
type TupleExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
NoBrackets bool // true if a tuple has no brackets, e.g. `a, b = x`
Start Position
List []Expr
End
ForceCompact bool // force compact (non-multiline) form when printing
ForceMultiLine bool // force multiline form when printing
}
func (x *TupleExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add(")")
if !x.NoBrackets {
return x.Start, x.End.Pos.add(")")
}
start, _ = x.List[0].Span()
_, end = x.List[len(x.List)-1].Span()
return start, end
}
// A UnaryExpr represents a unary expression: Op X.
@@ -326,6 +354,9 @@ type UnaryExpr struct {
}
func (x *UnaryExpr) Span() (start, end Position) {
if x.X == nil {
return x.OpStart, x.OpStart
}
_, end = x.X.Span()
return x.OpStart, end
}
@@ -346,6 +377,22 @@ func (x *BinaryExpr) Span() (start, end Position) {
return start, end
}
// An AssignExpr represents a binary expression with `=`: LHS = RHS.
type AssignExpr struct {
Comments
LHS Expr
OpPos Position
Op string
LineBreak bool // insert line break between Op and RHS
RHS Expr
}
func (x *AssignExpr) Span() (start, end Position) {
start, _ = x.LHS.Span()
_, end = x.RHS.Span()
return start, end
}
// A ParenExpr represents a parenthesized expression: (X).
type ParenExpr struct {
Comments
@@ -374,7 +421,7 @@ type SliceExpr struct {
func (x *SliceExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.End
return start, x.End.add("]")
}
// An IndexExpr represents an index expression: X[Y].
@@ -388,21 +435,30 @@ type IndexExpr struct {
func (x *IndexExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.End
return start, x.End.add("]")
}
// A Function represents the common parts of LambdaExpr and DefStmt
type Function struct {
Comments
StartPos Position // position of DEF or LAMBDA token
Params []Expr
Body []Expr
}
func (x *Function) Span() (start, end Position) {
_, end = x.Body[len(x.Body)-1].Span()
return x.StartPos, end
}
// A LambdaExpr represents a lambda expression: lambda Var: Expr.
type LambdaExpr struct {
Comments
Lambda Position
Var []Expr
Colon Position
Expr Expr
Function
}
func (x *LambdaExpr) Span() (start, end Position) {
_, end = x.Expr.Span()
return x.Lambda, end
return x.Function.Span()
}
// ConditionalExpr represents the conditional: X if TEST else ELSE.
@@ -423,73 +479,93 @@ func (x *ConditionalExpr) Span() (start, end Position) {
return start, end
}
// A CodeBlock represents an indented code block.
type CodeBlock struct {
Statements []Expr
Start Position
End
}
func (x *CodeBlock) Span() (start, end Position) {
return x.Start, x.End.Pos
}
// A FuncDef represents a function definition expression: def foo(List):.
type FuncDef struct {
// A LoadStmt loads another module and binds names from it:
// load(Module, "x", y="foo").
//
// The AST is slightly unfaithful to the concrete syntax here because
// Skylark's load statement, so that it can be implemented in Python,
// binds some names (like y above) with an identifier and some (like x)
// without. For consistency we create fake identifiers for all the
// strings.
type LoadStmt struct {
Comments
Start Position // position of def
Load Position
Module *StringExpr
From []*Ident // name defined in loading module
To []*Ident // name in loaded module
Rparen End
ForceCompact bool // force compact (non-multiline) form when printing
}
func (x *LoadStmt) Span() (start, end Position) {
return x.Load, x.Rparen.Pos.add(")")
}
// A DefStmt represents a function definition expression: def foo(List):.
type DefStmt struct {
Comments
Function
Name string
ListStart Position // position of (
Args []Expr
Body CodeBlock
End // position of the end
ForceCompact bool // force compact (non-multiline) form when printing
ForceMultiLine bool // force multiline form when printing
ColonPos Position // position of the ":"
ForceCompact bool // force compact (non-multiline) form when printing the arguments
ForceMultiLine bool // force multiline form when printing the arguments
}
func (x *FuncDef) Span() (start, end Position) {
return x.Start, x.End.Pos
func (x *DefStmt) Span() (start, end Position) {
return x.Function.Span()
}
// A ReturnExpr represents a return statement: return f(x).
type ReturnExpr struct {
// HeaderSpan returns the span of the function header `def f(...):`
func (x *DefStmt) HeaderSpan() (start, end Position) {
return x.Function.StartPos, x.ColonPos
}
// A ReturnStmt represents a return statement: return f(x).
type ReturnStmt struct {
Comments
Start Position
X Expr
End Position
Return Position
Result Expr // may be nil
}
func (x *ReturnExpr) Span() (start, end Position) {
return x.Start, x.End
func (x *ReturnStmt) Span() (start, end Position) {
if x.Result == nil {
return x.Return, x.Return.add("return")
}
_, end = x.Result.Span()
return x.Return, end
}
// A ForLoop represents a for loop block: for x in range(10):.
type ForLoop struct {
// A ForStmt represents a for loop block: for x in range(10):.
type ForStmt struct {
Comments
Start Position // position of for
LoopVars []Expr
Iterable Expr
Body CodeBlock
End // position of the end
Function
For Position // position of for
Vars Expr
X Expr
Body []Expr
}
func (x *ForLoop) Span() (start, end Position) {
return x.Start, x.End.Pos
func (x *ForStmt) Span() (start, end Position) {
end = stmtsEnd(x.Body)
return x.For, end
}
// An IfElse represents an if-else blocks sequence: if x: ... elif y: ... else: ... .
type IfElse struct {
// An IfStmt represents an if-else block: if x: ... else: ... .
// `elif`s are treated as a chain of `IfStmt`s.
type IfStmt struct {
Comments
Start Position // position of if
Conditions []Condition
End // position of the end
If Position // position of if
Cond Expr
True []Expr
ElsePos End // position of else or elif
False []Expr // optional
}
type Condition struct {
If Expr
Then CodeBlock
}
func (x *IfElse) Span() (start, end Position) {
return x.Start, x.End.Pos
func (x *IfStmt) Span() (start, end Position) {
body := x.False
if body == nil {
body = x.True
}
end = stmtsEnd(body)
return x.If, end
}

View File

@@ -24,13 +24,22 @@ package build
//
func Walk(v Expr, f func(x Expr, stk []Expr)) {
var stack []Expr
walk1(&v, &stack, func(x Expr, stk []Expr) Expr {
walk1(&v, &stack, func(x *Expr, stk []Expr) Expr {
f(*x, stk)
return nil
})
}
// WalkPointers is the same as Walk but calls the callback function with pointers to nodes.
func WalkPointers(v Expr, f func(x *Expr, stk []Expr)) {
var stack []Expr
walk1(&v, &stack, func(x *Expr, stk []Expr) Expr {
f(x, stk)
return nil
})
}
// WalkAndUpdate walks the expression tree v, calling f on all subexpressions
// Edit walks the expression tree v, calling f on all subexpressions
// in a preorder traversal. If f returns a non-nil value, the tree is mutated.
// The new value replaces the old one.
//
@@ -39,97 +48,199 @@ func Walk(v Expr, f func(x Expr, stk []Expr)) {
//
func Edit(v Expr, f func(x Expr, stk []Expr) Expr) Expr {
var stack []Expr
return walk1(&v, &stack, f)
return walk1(&v, &stack, func(x *Expr, stk []Expr) Expr {
return f(*x, stk)
})
}
// walk1 is the actual implementation of Walk and WalkAndUpdate.
// It has the same signature and meaning as Walk,
// except that it maintains in *stack the current stack
// of nodes. Using a pointer to a slice here ensures that
// as the stack grows and shrinks the storage can be
// reused for the next growth.
func walk1(v *Expr, stack *[]Expr, f func(x Expr, stk []Expr) Expr) Expr {
// EditChildren is similar to Edit but doesn't visit the initial node, instead goes
// directly to its children.
func EditChildren(v Expr, f func(x Expr, stk []Expr) Expr) {
stack := []Expr{v}
WalkOnce(v, func(x *Expr) {
walk1(x, &stack, func(x *Expr, stk []Expr) Expr {
return f(*x, stk)
})
})
}
// walk1 is a helper function for Walk, WalkWithPostfix, and Edit.
func walk1(v *Expr, stack *[]Expr, f func(x *Expr, stk []Expr) Expr) Expr {
if v == nil {
return nil
}
if res := f(*v, *stack); res != nil {
if res := f(v, *stack); res != nil {
*v = res
}
*stack = append(*stack, *v)
switch v := (*v).(type) {
case *File:
for _, stmt := range v.Stmt {
walk1(&stmt, stack, f)
}
case *DotExpr:
walk1(&v.X, stack, f)
case *IndexExpr:
walk1(&v.X, stack, f)
walk1(&v.Y, stack, f)
case *KeyValueExpr:
walk1(&v.Key, stack, f)
walk1(&v.Value, stack, f)
case *SliceExpr:
walk1(&v.X, stack, f)
if v.From != nil {
walk1(&v.From, stack, f)
}
if v.To != nil {
walk1(&v.To, stack, f)
}
if v.Step != nil {
walk1(&v.Step, stack, f)
}
case *ParenExpr:
walk1(&v.X, stack, f)
case *UnaryExpr:
walk1(&v.X, stack, f)
case *BinaryExpr:
walk1(&v.X, stack, f)
walk1(&v.Y, stack, f)
case *LambdaExpr:
for i := range v.Var {
walk1(&v.Var[i], stack, f)
}
walk1(&v.Expr, stack, f)
case *CallExpr:
walk1(&v.X, stack, f)
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *ListExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *SetExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *TupleExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *DictExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *ListForExpr:
walk1(&v.X, stack, f)
for _, c := range v.For {
for j := range c.For.Var {
walk1(&c.For.Var[j], stack, f)
}
walk1(&c.For.Expr, stack, f)
for _, i := range c.Ifs {
walk1(&i.Cond, stack, f)
}
}
case *ConditionalExpr:
walk1(&v.Then, stack, f)
walk1(&v.Test, stack, f)
walk1(&v.Else, stack, f)
}
WalkOnce(*v, func(x *Expr) {
walk1(x, stack, f)
})
*stack = (*stack)[:len(*stack)-1]
return *v
}
// WalkOnce calls f on every child of v.
func WalkOnce(v Expr, f func(x *Expr)) {
switch v := v.(type) {
case *File:
for i := range v.Stmt {
f(&v.Stmt[i])
}
case *DotExpr:
f(&v.X)
case *IndexExpr:
f(&v.X)
f(&v.Y)
case *KeyValueExpr:
f(&v.Key)
f(&v.Value)
case *SliceExpr:
f(&v.X)
if v.From != nil {
f(&v.From)
}
if v.To != nil {
f(&v.To)
}
if v.Step != nil {
f(&v.Step)
}
case *ParenExpr:
f(&v.X)
case *UnaryExpr:
f(&v.X)
case *BinaryExpr:
f(&v.X)
f(&v.Y)
case *AssignExpr:
f(&v.LHS)
f(&v.RHS)
case *LambdaExpr:
for i := range v.Params {
f(&v.Params[i])
}
for i := range v.Body {
f(&v.Body[i])
}
case *CallExpr:
f(&v.X)
for i := range v.List {
f(&v.List[i])
}
case *ListExpr:
for i := range v.List {
f(&v.List[i])
}
case *SetExpr:
for i := range v.List {
f(&v.List[i])
}
case *TupleExpr:
for i := range v.List {
f(&v.List[i])
}
case *DictExpr:
for i := range v.List {
f(&v.List[i])
}
case *Comprehension:
f(&v.Body)
for _, c := range v.Clauses {
f(&c)
}
case *IfClause:
f(&v.Cond)
case *ForClause:
f(&v.Vars)
f(&v.X)
case *ConditionalExpr:
f(&v.Then)
f(&v.Test)
f(&v.Else)
case *LoadStmt:
module := (Expr)(v.Module)
f(&module)
v.Module = module.(*StringExpr)
for i := range v.From {
from := (Expr)(v.From[i])
f(&from)
v.From[i] = from.(*Ident)
to := (Expr)(v.To[i])
f(&to)
v.To[i] = to.(*Ident)
}
case *DefStmt:
for i := range v.Params {
f(&v.Params[i])
}
for i := range v.Body {
f(&v.Body[i])
}
case *IfStmt:
f(&v.Cond)
for i := range v.True {
f(&v.True[i])
}
for i := range v.False {
f(&v.False[i])
}
case *ForStmt:
f(&v.Vars)
f(&v.X)
for i := range v.Body {
f(&v.Body[i])
}
case *ReturnStmt:
if v.Result != nil {
f(&v.Result)
}
}
}
// walkStatements is a helper function for WalkStatements
func walkStatements(v Expr, stack *[]Expr, f func(x Expr, stk []Expr)) {
if v == nil {
return
}
f(v, *stack)
*stack = append(*stack, v)
traverse := func(x Expr) {
walkStatements(x, stack, f)
}
switch expr := v.(type) {
case *File:
for _, s := range expr.Stmt {
traverse(s)
}
case *DefStmt:
for _, s := range expr.Body {
traverse(s)
}
case *IfStmt:
for _, s := range expr.True {
traverse(s)
}
for _, s := range expr.False {
traverse(s)
}
case *ForStmt:
for _, s := range expr.Body {
traverse(s)
}
}
*stack = (*stack)[:len(*stack)-1]
}
// WalkStatements traverses sub statements (not all nodes)
func WalkStatements(v Expr, f func(x Expr, stk []Expr)) {
var stack []Expr
walkStatements(v, &stack, f)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,565 @@
// Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// This file contains the protocol buffer representation of a build
// file or 'blaze query --output=proto' call.
syntax = "proto2";
package blaze_query;
// option cc_api_version = 2;
// option java_api_version = 1;
option java_package = "com.google.devtools.build.lib.query2.proto.proto2api";
message License {
repeated string license_type = 1;
repeated string exception = 2;
}
message StringDictEntry {
required string key = 1;
required string value = 2;
}
message LabelDictUnaryEntry {
required string key = 1;
required string value = 2;
}
message LabelListDictEntry {
required string key = 1;
repeated string value = 2;
}
message LabelKeyedStringDictEntry {
required string key = 1;
required string value = 2;
}
message StringListDictEntry {
required string key = 1;
repeated string value = 2;
}
// Represents an entry attribute of a Fileset rule in a build file.
message FilesetEntry {
// Indicates what to do when a source file is actually a symlink.
enum SymlinkBehavior {
COPY = 1;
DEREFERENCE = 2;
}
// The label pointing to the source target where files are copied from.
required string source = 1;
// The relative path within the fileset rule where files will be mapped.
required string destination_directory = 2;
// Whether the files= attribute was specified. This is necessary because
// no files= attribute and files=[] mean different things.
optional bool files_present = 7;
// A list of file labels to include from the source directory.
repeated string file = 3;
// If this is a fileset entry representing files within the rule
// package, this lists relative paths to files that should be excluded from
// the set. This cannot contain values if 'file' also has values.
repeated string exclude = 4;
// This field is optional because there will be some time when the new
// PB is used by tools depending on blaze query, but the new blaze version
// is not yet released.
// TODO(bazel-team): Make this field required once a version of Blaze is
// released that outputs this field.
optional SymlinkBehavior symlink_behavior = 5 [ default=COPY ];
// The prefix to strip from the path of the files in this FilesetEntry. Note
// that no value and the empty string as the value mean different things here.
optional string strip_prefix = 6;
}
// A rule attribute. Each attribute must have a type and one of the various
// value fields populated - for the most part.
//
// Attributes of BOOLEAN and TRISTATE type may set all of the int, bool, and
// string values for backwards compatibility with clients that expect them to
// be set.
//
// Attributes of INTEGER, STRING, LABEL, LICENSE, BOOLEAN, and TRISTATE type
// may set *none* of the values. This can happen if the Attribute message is
// prepared for a client that doesn't support SELECTOR_LIST, but the rule has
// a selector list value for the attribute. (Selector lists for attributes of
// other types--the collection types--are handled differently when prepared
// for such a client. The possible collection values are gathered together
// and flattened.)
//
// By checking the type, the appropriate value can be extracted - see the
// comments on each type for the associated value. The order of lists comes
// from the blaze parsing. If an attribute is of a list type, the associated
// list should never be empty.
message Attribute {
// Indicates the type of attribute.
enum Discriminator {
INTEGER = 1; // int_value
STRING = 2; // string_value
LABEL = 3; // string_value
OUTPUT = 4; // string_value
STRING_LIST = 5; // string_list_value
LABEL_LIST = 6; // string_list_value
OUTPUT_LIST = 7; // string_list_value
DISTRIBUTION_SET = 8; // string_list_value - order is unimportant
LICENSE = 9; // license
STRING_DICT = 10; // string_dict_value
FILESET_ENTRY_LIST = 11; // fileset_list_value
LABEL_LIST_DICT = 12; // label_list_dict_value
STRING_LIST_DICT = 13; // string_list_dict_value
BOOLEAN = 14; // int, bool and string value
TRISTATE = 15; // tristate, int and string value
INTEGER_LIST = 16; // int_list_value
UNKNOWN = 18; // unknown type, use only for build extensions
LABEL_DICT_UNARY = 19; // label_dict_unary_value
SELECTOR_LIST = 20; // selector_list
LABEL_KEYED_STRING_DICT = 21; // label_keyed_string_dict
DEPRECATED_STRING_DICT_UNARY = 17;
}
// Values for the TriState field type.
enum Tristate {
NO = 0;
YES = 1;
AUTO = 2;
}
message SelectorEntry {
// The key of the selector entry. At this time, this is the label of a
// config_setting rule, or the pseudo-label "//conditions:default".
optional string label = 1;
// True if the entry's value is the default value for the type as a
// result of the condition value being specified as None (ie:
// {"//condition": None}).
optional bool is_default_value = 16;
// Exactly one of the following fields (except for glob_criteria) must be
// populated - note that the BOOLEAN and TRISTATE caveat in Attribute's
// comment does not apply here. The type field in the SelectorList
// containing this entry indicates which of these fields is populated,
// in accordance with the comments on Discriminator enum values above.
// (To be explicit: BOOLEAN populates the boolean_value field and TRISTATE
// populates the tristate_value field.)
optional int32 int_value = 2;
optional string string_value = 3;
optional bool boolean_value = 4;
optional Tristate tristate_value = 5;
repeated string string_list_value = 6;
optional License license = 7;
repeated StringDictEntry string_dict_value = 8;
repeated FilesetEntry fileset_list_value = 9;
repeated LabelListDictEntry label_list_dict_value = 10;
repeated StringListDictEntry string_list_dict_value = 11;
repeated int32 int_list_value = 13;
repeated LabelDictUnaryEntry label_dict_unary_value = 15;
repeated LabelKeyedStringDictEntry label_keyed_string_dict_value = 17;
repeated DEPRECATED_GlobCriteria DEPRECATED_glob_criteria = 12;
repeated bytes DEPRECATED_string_dict_unary_value = 14;
}
message Selector {
// The list of (label, value) pairs in the map that defines the selector.
// At this time, this cannot be empty, i.e. a selector has at least one
// entry.
repeated SelectorEntry entries = 1;
// Whether or not this has any default values.
optional bool has_default_value = 2;
// The error message when no condition matches.
optional string no_match_error = 3;
}
message SelectorList {
// The type that this selector list evaluates to, and the type that each
// selector in the list evaluates to. At this time, this cannot be
// SELECTOR_LIST, i.e. selector lists do not nest.
optional Discriminator type = 1;
// The list of selector elements in this selector list. At this time, this
// cannot be empty, i.e. a selector list is never empty.
repeated Selector elements = 2;
}
// The name of the attribute
required string name = 1;
// The location of the target in the BUILD file in a machine-parseable form.
optional Location DEPRECATED_parseable_location = 12;
// Whether the attribute was explicitly specified
optional bool explicitly_specified = 13;
// If this attribute has a string value or a string list value, then this
// may be set to indicate that the value may be treated as a label that
// isn't a dependency of this attribute's rule.
optional bool nodep = 20;
// The type of attribute. This message is used for all of the different
// attribute types so the discriminator helps for figuring out what is
// stored in the message.
required Discriminator type = 2;
// If this attribute has an integer value this will be populated.
// Boolean and TriState also use this field as [0,1] and [-1,0,1]
// for [false, true] and [auto, no, yes] respectively.
optional int32 int_value = 3;
// If the attribute has a string value this will be populated. Label and
// path attributes use this field as the value even though the type may
// be LABEL or something else other than STRING.
optional string string_value = 5;
// If the attribute has a boolean value this will be populated.
optional bool boolean_value = 14;
// If the attribute is a Tristate value, this will be populated.
optional Tristate tristate_value = 15;
// The value of the attribute has a list of string values (label and path
// note from STRING applies here as well).
repeated string string_list_value = 6;
// If this is a license attribute, the license information is stored here.
optional License license = 7;
// If this is a string dict, each entry will be stored here.
repeated StringDictEntry string_dict_value = 8;
// If the attribute is part of a Fileset, the fileset entries are stored in
// this field.
repeated FilesetEntry fileset_list_value = 9;
// If this is a label list dict, each entry will be stored here.
repeated LabelListDictEntry label_list_dict_value = 10;
// If this is a string list dict, each entry will be stored here.
repeated StringListDictEntry string_list_dict_value = 11;
// The value of the attribute has a list of int32 values
repeated int32 int_list_value = 17;
// If this is a label dict unary, each entry will be stored here.
repeated LabelDictUnaryEntry label_dict_unary_value = 19;
// If this is a label-keyed string dict, each entry will be stored here.
repeated LabelKeyedStringDictEntry label_keyed_string_dict_value = 22;
// If this attribute's value is an expression containing one or more select
// expressions, then its type is SELECTOR_LIST and a SelectorList will be
// stored here.
optional SelectorList selector_list = 21;
repeated DEPRECATED_GlobCriteria DEPRECATED_glob_criteria = 16;
repeated bytes DEPRECATED_string_dict_unary_value = 18;
}
// A rule instance (e.g., cc_library foo, java_binary bar).
message Rule {
// The name of the rule (formatted as an absolute label, e.g. //foo/bar:baz).
required string name = 1;
// The rule class (e.g., java_library)
required string rule_class = 2;
// The BUILD file and line number of the location (formatted as
// <absolute_path>:<line_number>) in the rule's package's BUILD file where the
// rule instance was instantiated. The line number will be that of a rule
// invocation or macro call (that in turn invoked a rule). See
// https://docs.bazel.build/versions/master/skylark/macros.html#macro-creation
optional string location = 3;
// All of the attributes that describe the rule.
repeated Attribute attribute = 4;
// All of the inputs to the rule (formatted as absolute labels). These are
// predecessors in the dependency graph.
repeated string rule_input = 5;
// All of the outputs of the rule (formatted as absolute labels). These are
// successors in the dependency graph.
repeated string rule_output = 6;
// The set of all default settings affecting this rule. The name of a default
// setting is "<setting type>_<setting name>". There currently defined setting
// types are:
//
// - 'blaze': settings implemented in Blaze itself
repeated string default_setting = 7;
// The location of the target in the BUILD file in a machine-parseable form.
optional Location DEPRECATED_parseable_location = 8;
// The rule's class's public by default value.
optional bool public_by_default = 9;
// If this rule is of a skylark-defined RuleClass.
optional bool is_skylark = 10;
// List of Skylark aspects that this rule applies.
repeated AttributeAspect skylark_attribute_aspects = 11;
// Hash encapsulating the behavior of this Skylark rule. Any change to this
// rule's definition that could change its behavior will be reflected here.
optional string skylark_environment_hash_code = 12;
}
// A pairing of attribute name and a Skylark aspect that is applied to that
// attribute.
message AttributeAspect {
required string attribute_name = 1;
required SkylarkAspect aspect = 2;
}
// Aspect defined in Skylark.
message SkylarkAspect {
required string extension_file_label = 1;
required string exported_name = 2;
repeated Attribute attribute = 3;
}
// Summary of all transitive dependencies of 'rule,' where each dependent
// rule is included only once in the 'dependency' field. Gives complete
// information to analyze the single build target labeled rule.name,
// including optional location of target in BUILD file.
message RuleSummary {
required Rule rule = 1;
repeated Rule dependency = 2;
optional string location = 3;
}
// A package group. Aside from the name, it contains the list of packages
// present in the group (as specified in the BUILD file).
message PackageGroup {
// The name of the package group
required string name = 1;
// The list of packages as specified in the BUILD file. Currently this is
// only a list of packages, but some time in the future, there might be
// some type of wildcard mechanism.
repeated string contained_package = 2;
// The list of sub package groups included in this one.
repeated string included_package_group = 3;
// The location of the target in the BUILD file in a machine-parseable form.
optional Location DEPRECATED_parseable_location = 4;
}
// An environment group.
message EnvironmentGroup {
// The name of the environment group.
required string name = 1;
// The environments that belong to this group (as labels).
repeated string environment = 2;
// The member environments that rules implicitly support if not otherwise
// specified.
repeated string default = 3;
}
// A file that is an input into the build system.
// Next-Id: 10
message SourceFile {
// The name of the source file (a label).
required string name = 1;
// The location of the source file. This is a path with line numbers, not
// a label in the build system.
optional string location = 2;
// The location of the corresponding label in the BUILD file in a
// machine-parseable form.
optional Location DEPRECATED_parseable_location = 7;
// Labels of .bzl (Skylark) files that are transitively loaded in this BUILD
// file. This is present only when the SourceFile represents a BUILD file that
// loaded .bzl files.
// TODO(bazel-team): Rename this field.
repeated string subinclude = 3;
// Labels of package groups that are mentioned in the visibility declaration
// for this source file.
repeated string package_group = 4;
// Labels mentioned in the visibility declaration (including :__pkg__ and
// //visibility: ones)
repeated string visibility_label = 5;
// The package-level features enabled for this package. Only present if the
// SourceFile represents a BUILD file.
repeated string feature = 6;
// License attribute for the file.
optional License license = 8;
// True if the package contains an error. Only present if the SourceFile
// represents a BUILD file.
optional bool package_contains_errors = 9;
}
// A file that is the output of a build rule.
message GeneratedFile {
// The name of the generated file (a label).
required string name = 1;
// The label of the target that generates the file.
required string generating_rule = 2;
// The path of the output file (not a label).
optional string location = 3;
}
// A target from a blaze query execution. Similar to the Attribute message,
// the Discriminator is used to determine which field contains information.
// For any given type, only one of these can be populated in a single Target.
message Target {
enum Discriminator {
RULE = 1;
SOURCE_FILE = 2;
GENERATED_FILE = 3;
PACKAGE_GROUP = 4;
ENVIRONMENT_GROUP = 5;
}
// The type of target contained in the message.
required Discriminator type = 1;
// If this target represents a rule, the rule is stored here.
optional Rule rule = 2;
// A file that is not generated by the build system (version controlled
// or created by the test harness).
optional SourceFile source_file = 3;
// A generated file that is the output of a rule.
optional GeneratedFile generated_file = 4;
// A package group.
optional PackageGroup package_group = 5;
// An environment group.
optional EnvironmentGroup environment_group = 6;
}
// Container for all of the blaze query results.
message QueryResult {
// All of the targets returned by the blaze query.
repeated Target target = 1;
}
////////////////////////////////////////////////////////////////////////////
// Messages dealing with querying the BUILD language itself. For now, this is
// quite simplistic: Blaze can only tell the names of the rule classes, their
// attributes with their type.
// Information about allowed rule classes for a specific attribute of a rule.
message AllowedRuleClassInfo {
enum AllowedRuleClasses {
ANY = 1; // Any rule is allowed to be in this attribute
SPECIFIED = 2; // Only the explicitly listed rules are allowed
}
required AllowedRuleClasses policy = 1;
// Rule class names of rules allowed in this attribute, e.g "cc_library",
// "py_binary". Only present if the allowed_rule_classes field is set to
// SPECIFIED.
repeated string allowed_rule_class = 2;
}
// This message represents a single attribute of a single rule.
message AttributeDefinition {
// Attribute name, i.e. "name", "srcs", "deps"
required string name = 1;
required Attribute.Discriminator type = 2;
required bool mandatory = 3;
// Only present for attributes of type LABEL and LABEL_LIST.
optional AllowedRuleClassInfo allowed_rule_classes = 4;
optional string documentation = 5;
}
message RuleDefinition {
required string name = 1;
// Only contains documented attributes
repeated AttributeDefinition attribute = 2;
optional string documentation = 3;
// Only for build extensions: label to file that defines the extension
optional string label = 4;
}
message BuildLanguage {
// Only contains documented rule definitions
repeated RuleDefinition rule = 1;
}
message Location {
optional int32 start_offset = 1;
optional int32 start_line = 2;
optional int32 start_column = 3;
optional int32 end_offset = 4;
optional int32 end_line = 5;
optional int32 end_column = 6;
}
message MakeVarBinding {
required string value = 1;
required string platform_set_regexp = 2;
}
message MakeVar {
required string name = 1;
repeated MakeVarBinding binding = 2;
}
message DEPRECATED_GlobCriteria {
// List of includes (or items if this criteria did not come from a glob)
repeated string include = 1;
// List of exclude expressions
repeated string exclude = 2;
// Whether this message came from a glob
optional bool glob = 3;
}
message Event {
enum EventKind {
ERROR = 1;
WARNING = 2;
INFO = 3;
PROGRESS = 4;
}
required EventKind kind = 1;
optional Location DEPRECATED_location = 2;
optional string message = 3;
}

View File

@@ -7,6 +7,7 @@ go_library(
importpath = "github.com/bazelbuild/buildtools/buildozer",
visibility = ["//visibility:private"],
deps = [
"//vendor/github.com/bazelbuild/buildtools/build:go_default_library",
"//vendor/github.com/bazelbuild/buildtools/edit:go_default_library",
"//vendor/github.com/bazelbuild/buildtools/tables:go_default_library",
],

View File

@@ -4,25 +4,12 @@ Buildozer is a command line tool to rewrite multiple
[Bazel](https://github.com/bazelbuild/bazel) BUILD files using
standard commands.
## Dependencies
1. Protobuf go runtime: to download
`go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
## Installation
1. Change directory to the buildifier/buildozer
1. Build a binary and put it into your $GOPATH/bin:
```bash
gopath=$(go env GOPATH)
cd $gopath/src/github.com/bazelbuild/buildtools/buildozer
```
2. Install
```bash
go install
go get github.com/bazelbuild/buildtools/buildozer
```
## Usage
@@ -77,7 +64,7 @@ Buildozer supports the following commands(`'command args'`):
* `add <attr> <value(s)>`: Adds value(s) to a list attribute of a rule. If a
value is already present in the list, it is not added.
* `new_load <path> <symbol(s)>`: Add a load statement for the given path,
* `new_load <path> <[to=]from(s)>`: Add a load statement for the given path,
importing the symbols. Before using this, make sure to run
`buildozer 'fix movePackageToTop'`. Afterwards, consider running
`buildozer 'fix unusedLoads'`.
@@ -96,6 +83,8 @@ Buildozer supports the following commands(`'command args'`):
* `remove <attr> <value(s)>`: Removes `value(s)` from the list `attr`. The
wildcard `*` matches all attributes. Lists containing none of the `value(s)` are
not modified.
* `remove_comment <attr>? <value>?`: Removes the comment attached to the rule,
an attribute, or a specific value in a list.
* `rename <old_attr> <new_attr>`: Rename the `old_attr` to `new_attr` which must
not yet exist.
* `replace <attr> <old_value> <new_value>`: Replaces `old_value` with `new_value`
@@ -117,6 +106,11 @@ Buildozer supports the following commands(`'command args'`):
exists in the `to_rule`, it will be overwritten.
* `copy_no_overwrite <attr> <from_rule>`: Copies the value of `attr` between
rules. If it exists in the `to_rule`, no action is taken.
* `dict_add <attr> <(key:value)(s)>`: Sets the value of a key for the dict
attribute `attr`. If the key was already present, it will _not_ be overwritten
* `dict_set <attr> <(key:value)(s)>`: Sets the value of a key for the dict
attribute `attr`. If the key was already present, its old value is replaced.
* `dict_delete <attr> <key(s)>`: Deletes the key for the dict attribute `attr`.
Here, `<attr>` represents an attribute (being `add`ed/`rename`d/`delete`d etc.),
e.g.: `srcs`, `<value(s)>` represents values of the attribute and so on.
@@ -124,7 +118,7 @@ A '?' indicates that the preceding argument is optional.
The fix command without a fix specified applied to all eligible fixes.
Use `//path/to/pkg:__pkg__` as label for file level changes like `new_load` and
`new_rule`.
`new`.
A transformation can be applied to all rules of a particular kind by using
`%rule_kind` at the end of the label(see examples below).

View File

@@ -20,11 +20,16 @@ import (
"os"
"strings"
"github.com/bazelbuild/buildtools/build"
"github.com/bazelbuild/buildtools/edit"
"github.com/bazelbuild/buildtools/tables"
)
var (
buildVersion = "redacted"
buildScmRevision = "redacted"
version = flag.Bool("version", false, "Print the version of buildozer")
stdout = flag.Bool("stdout", false, "write changed BUILD file to stdout")
buildifier = flag.String("buildifier", "", "format output using a specific buildifier binary. If empty, use built-in formatter")
parallelism = flag.Int("P", 0, "number of cores to use for concurrent actions")
@@ -61,6 +66,12 @@ func stringList(name, help string) func() []string {
func main() {
flag.Parse()
if *version {
fmt.Printf("buildozer version: %s \n", buildVersion)
fmt.Printf("buildozer scm revision: %s \n", buildScmRevision)
os.Exit(0)
}
if *tablesPath != "" {
if err := tables.ParseAndUpdateJSONDefinitions(*tablesPath, false); err != nil {
fmt.Fprintf(os.Stderr, "buildifier: failed to parse %s for -tables: %s\n", *tablesPath, err)
@@ -75,6 +86,9 @@ func main() {
}
}
if !(*shortenLabelsFlag) {
build.DisableRewrites = []string{"label"}
}
edit.ShortenLabelsFlag = *shortenLabelsFlag
edit.DeleteWithComments = *deleteWithComments
opts := &edit.Options{

View File

@@ -10,7 +10,8 @@ distributed under the License is distributed on an "AS IS" BASIS,
See the License for the specific language governing permissions and
limitations under the License.
*/
// Buildozer is a tool for programatically editing BUILD files.
// Buildozer is a tool for programmatically editing BUILD files.
package edit
@@ -58,7 +59,7 @@ func NewOpts() *Options {
return &Options{NumIO: 200, PreferEOLComments: true}
}
// Usage is a user-overriden func to print the program usage.
// Usage is a user-overridden func to print the program usage.
var Usage = func() {}
var fileModified = false // set to true when a file has been fixed
@@ -69,7 +70,7 @@ const stdinPackageName = "-" // the special package name to represent stdin
type CmdEnvironment struct {
File *build.File // the AST
Rule *build.Rule // the rule to modify
Vars map[string]*build.BinaryExpr // global variables set in the build file
Vars map[string]*build.AssignExpr // global variables set in the build file
Pkg string // the full package name
Args []string // the command-line arguments
output *apipb.Output_Record // output proto, stores whatever a command wants to print
@@ -84,9 +85,10 @@ func cmdAdd(opts *Options, env CmdEnvironment) (*build.File, error) {
AddValueToListAttribute(env.Rule, attr, env.Pkg, &build.LiteralExpr{Token: val}, &env.Vars)
continue
}
strVal := &build.StringExpr{Value: ShortenLabel(val, env.Pkg)}
strVal := getStringExpr(val, env.Pkg)
AddValueToListAttribute(env.Rule, attr, env.Pkg, strVal, &env.Vars)
}
ResolveAttr(env.Rule, attr, env.Pkg)
return env.File, nil
}
@@ -96,8 +98,10 @@ func cmdComment(opts *Options, env CmdEnvironment) (*build.File, error) {
str = strings.Replace(str, "\\n", "\n", -1)
// Multiline comments should go on a separate line.
fullLine := !opts.PreferEOLComments || strings.Contains(str, "\n")
str = strings.Replace("# "+str, "\n", "\n# ", -1)
comment := []build.Comment{{Token: str}}
comment := []build.Comment{}
for _, line := range strings.Split(str, "\n") {
comment = append(comment, build.Comment{Token: "# " + line})
}
// The comment might be attached to a rule, an attribute, or a value in a list,
// depending on how many arguments are passed.
@@ -107,9 +111,9 @@ func cmdComment(opts *Options, env CmdEnvironment) (*build.File, error) {
case 2: // Attach to an attribute
if attr := env.Rule.AttrDefn(env.Args[0]); attr != nil {
if fullLine {
attr.X.Comment().Before = comment
attr.LHS.Comment().Before = comment
} else {
attr.Y.Comment().Suffix = comment
attr.RHS.Comment().Suffix = comment
}
}
case 3: // Attach to a specific value in a list
@@ -218,7 +222,7 @@ func cmdNew(opts *Options, env CmdEnvironment) (*build.File, error) {
return nil, fmt.Errorf("rule '%s' already exists", name)
}
call := &build.CallExpr{X: &build.LiteralExpr{Token: kind}}
call := &build.CallExpr{X: &build.Ident{Name: kind}}
rule := &build.Rule{call, ""}
rule.SetAttr("name", &build.StringExpr{Value: name})
@@ -253,7 +257,16 @@ func findInsertionIndex(env CmdEnvironment) (bool, int, error) {
}
func cmdNewLoad(opts *Options, env CmdEnvironment) (*build.File, error) {
env.File.Stmt = InsertLoad(env.File.Stmt, env.Args)
from := env.Args[1:]
to := append([]string{}, from...)
for i := range from {
if s := strings.SplitN(from[i], "=", 2); len(s) == 2 {
to[i] = s[0]
from[i] = s[1]
}
}
env.File.Stmt = InsertLoad(env.File.Stmt, env.Args[0], from, to)
return env.File, nil
}
@@ -290,6 +303,8 @@ func cmdPrint(opts *Options, env CmdEnvironment) (*build.File, error) {
fields[i] = &apipb.Output_Record_Field{Value: &apipb.Output_Record_Field_Error{Error: apipb.Output_Record_Field_MISSING}}
} else if lit, ok := value.(*build.LiteralExpr); ok {
fields[i] = &apipb.Output_Record_Field{Value: &apipb.Output_Record_Field_Text{lit.Token}}
} else if lit, ok := value.(*build.Ident); ok {
fields[i] = &apipb.Output_Record_Field{Value: &apipb.Output_Record_Field_Text{lit.Name}}
} else if string, ok := value.(*build.StringExpr); ok {
fields[i] = &apipb.Output_Record_Field{
Value: &apipb.Output_Record_Field_Text{string.Value},
@@ -326,6 +341,7 @@ func cmdRemove(opts *Options, env CmdEnvironment) (*build.File, error) {
ListAttributeDelete(env.Rule, key, val, env.Pkg)
fixed = true
}
ResolveAttr(env.Rule, key, env.Pkg)
}
if fixed {
return env.File, nil
@@ -334,6 +350,38 @@ func cmdRemove(opts *Options, env CmdEnvironment) (*build.File, error) {
return nil, nil
}
func cmdRemoveComment(opts *Options, env CmdEnvironment) (*build.File, error) {
switch len(env.Args) {
case 0: // Remove comment attached to rule
env.Rule.Call.Comments.Before = nil
env.Rule.Call.Comments.Suffix = nil
env.Rule.Call.Comments.After = nil
case 1: // Remove comment attached to attr
if attr := env.Rule.AttrDefn(env.Args[0]); attr != nil {
attr.Comments.Before = nil
attr.Comments.Suffix = nil
attr.Comments.After = nil
attr.LHS.Comment().Before = nil
attr.LHS.Comment().Suffix = nil
attr.LHS.Comment().After = nil
attr.RHS.Comment().Before = nil
attr.RHS.Comment().Suffix = nil
attr.RHS.Comment().After = nil
}
case 2: // Remove comment attached to value
if attr := env.Rule.Attr(env.Args[0]); attr != nil {
if expr := ListFind(attr, env.Args[1], env.Pkg); expr != nil {
expr.Comments.Before = nil
expr.Comments.Suffix = nil
expr.Comments.After = nil
}
}
default:
panic("cmdRemoveComment")
}
return env.File, nil
}
func cmdRename(opts *Options, env CmdEnvironment) (*build.File, error) {
oldAttr := env.Args[0]
newAttr := env.Args[1]
@@ -350,7 +398,7 @@ func cmdReplace(opts *Options, env CmdEnvironment) (*build.File, error) {
attr := env.Rule.Attr(key)
if e, ok := attr.(*build.StringExpr); ok {
if LabelsEqual(e.Value, oldV, env.Pkg) {
env.Rule.SetAttr(key, getAttrValueExpr(key, []string{newV}))
env.Rule.SetAttr(key, getAttrValueExpr(key, []string{newV}, env))
}
} else {
ListReplace(attr, oldV, newV, env.Pkg)
@@ -373,7 +421,7 @@ func cmdSubstitute(opts *Options, env CmdEnvironment) (*build.File, error) {
continue
}
if newValue, ok := stringSubstitute(e.Value, oldRegexp, newTemplate); ok {
env.Rule.SetAttr(key, getAttrValueExpr(key, []string{newValue}))
env.Rule.SetAttr(key, getAttrValueExpr(key, []string{newValue}, env))
}
}
return env.File, nil
@@ -385,7 +433,7 @@ func cmdSet(opts *Options, env CmdEnvironment) (*build.File, error) {
if attr == "kind" {
env.Rule.SetKind(args[0])
} else {
env.Rule.SetAttr(attr, getAttrValueExpr(attr, args))
env.Rule.SetAttr(attr, getAttrValueExpr(attr, args, env))
}
return env.File, nil
}
@@ -397,12 +445,12 @@ func cmdSetIfAbsent(opts *Options, env CmdEnvironment) (*build.File, error) {
return nil, fmt.Errorf("setting 'kind' is not allowed for set_if_absent. Got %s", env.Args)
}
if env.Rule.Attr(attr) == nil {
env.Rule.SetAttr(attr, getAttrValueExpr(attr, args))
env.Rule.SetAttr(attr, getAttrValueExpr(attr, args, env))
}
return env.File, nil
}
func getAttrValueExpr(attr string, args []string) build.Expr {
func getAttrValueExpr(attr string, args []string, env CmdEnvironment) build.Expr {
switch {
case attr == "kind":
return nil
@@ -414,17 +462,28 @@ func getAttrValueExpr(attr string, args []string) build.Expr {
return &build.ListExpr{List: list}
case IsList(attr) && !(len(args) == 1 && strings.HasPrefix(args[0], "glob(")):
var list []build.Expr
for _, i := range args {
list = append(list, &build.StringExpr{Value: i})
for _, arg := range args {
list = append(list, getStringExpr(arg, env.Pkg))
}
return &build.ListExpr{List: list}
case len(args) == 0:
// Expected a non-list argument, nothing provided
return &build.Ident{Name: "None"}
case IsString(attr):
return &build.StringExpr{Value: args[0]}
return getStringExpr(args[0], env.Pkg)
default:
return &build.LiteralExpr{Token: args[0]}
return &build.Ident{Name: args[0]}
}
}
func getStringExpr(value, pkg string) build.Expr {
unquoted, triple, err := build.Unquote(value)
if err == nil {
return &build.StringExpr{Value: ShortenLabel(unquoted, pkg), TripleQuote: triple}
}
return &build.StringExpr{Value: ShortenLabel(value, pkg)}
}
func cmdCopy(opts *Options, env CmdEnvironment) (*build.File, error) {
attrName := env.Args[0]
from := env.Args[1]
@@ -443,6 +502,77 @@ func cmdCopyNoOverwrite(opts *Options, env CmdEnvironment) (*build.File, error)
return copyAttributeBetweenRules(env, attrName, from)
}
// cmdDictAdd adds a key to a dict, if that key does _not_ exit already.
func cmdDictAdd(opts *Options, env CmdEnvironment) (*build.File, error) {
attr := env.Args[0]
args := env.Args[1:]
dict := &build.DictExpr{}
currDict, ok := env.Rule.Attr(attr).(*build.DictExpr)
if ok {
dict = currDict
}
for _, x := range args {
kv := strings.Split(x, ":")
expr := getStringExpr(kv[1], env.Pkg)
prev := DictionaryGet(dict, kv[0])
if prev == nil {
// Only set the value if the value is currently unset.
DictionarySet(dict, kv[0], expr)
}
}
env.Rule.SetAttr(attr, dict)
return env.File, nil
}
// cmdDictSet adds a key to a dict, overwriting any previous values.
func cmdDictSet(opts *Options, env CmdEnvironment) (*build.File, error) {
attr := env.Args[0]
args := env.Args[1:]
dict := &build.DictExpr{}
currDict, ok := env.Rule.Attr(attr).(*build.DictExpr)
if ok {
dict = currDict
}
for _, x := range args {
kv := strings.Split(x, ":")
expr := getStringExpr(kv[1], env.Pkg)
// Set overwrites previous values.
DictionarySet(dict, kv[0], expr)
}
env.Rule.SetAttr(attr, dict)
return env.File, nil
}
// cmdDictRemove removes a key from a dict.
func cmdDictRemove(opts *Options, env CmdEnvironment) (*build.File, error) {
attr := env.Args[0]
args := env.Args[1:]
thing := env.Rule.Attr(attr)
dictAttr, ok := thing.(*build.DictExpr)
if !ok {
return env.File, nil
}
for _, x := range args {
// should errors here be flagged?
DictionaryDelete(dictAttr, x)
env.Rule.SetAttr(attr, dictAttr)
}
// If the removal results in the dict having no contents, delete the attribute (stay clean!)
if dictAttr == nil || len(dictAttr.List) == 0 {
env.Rule.DelAttr(attr)
}
return env.File, nil
}
func copyAttributeBetweenRules(env CmdEnvironment, attrName string, from string) (*build.File, error) {
fromRule := FindRuleByName(env.File, from)
if fromRule == nil {
@@ -453,7 +583,7 @@ func copyAttributeBetweenRules(env CmdEnvironment, attrName string, from string)
return nil, fmt.Errorf("rule '%s' does not have attribute '%s'", from, attrName)
}
ast, err := build.Parse("" /* filename */, []byte(build.FormatString(attr)))
ast, err := build.ParseBuild("" /* filename */, []byte(build.FormatString(attr)))
if err != nil {
return nil, fmt.Errorf("could not parse attribute value %v", build.FormatString(attr))
}
@@ -474,6 +604,7 @@ func cmdFix(opts *Options, env CmdEnvironment) (*build.File, error) {
// CommandInfo provides a command function and info on incoming arguments.
type CommandInfo struct {
Fn func(*Options, CmdEnvironment) (*build.File, error)
PerRule bool
MinArg int
MaxArg int
Template string
@@ -482,23 +613,27 @@ type CommandInfo struct {
// AllCommands associates the command names with their function and number
// of arguments.
var AllCommands = map[string]CommandInfo{
"add": {cmdAdd, 2, -1, "<attr> <value(s)>"},
"new_load": {cmdNewLoad, 1, -1, "<path> <symbol(s)>"},
"comment": {cmdComment, 1, 3, "<attr>? <value>? <comment>"},
"print_comment": {cmdPrintComment, 0, 2, "<attr>? <value>?"},
"delete": {cmdDelete, 0, 0, ""},
"fix": {cmdFix, 0, -1, "<fix(es)>?"},
"move": {cmdMove, 3, -1, "<old_attr> <new_attr> <value(s)>"},
"new": {cmdNew, 2, 4, "<rule_kind> <rule_name> [(before|after) <relative_rule_name>]"},
"print": {cmdPrint, 0, -1, "<attribute(s)>"},
"remove": {cmdRemove, 1, -1, "<attr> <value(s)>"},
"rename": {cmdRename, 2, 2, "<old_attr> <new_attr>"},
"replace": {cmdReplace, 3, 3, "<attr> <old_value> <new_value>"},
"substitute": {cmdSubstitute, 3, 3, "<attr> <old_regexp> <new_template>"},
"set": {cmdSet, 2, -1, "<attr> <value(s)>"},
"set_if_absent": {cmdSetIfAbsent, 2, -1, "<attr> <value(s)>"},
"copy": {cmdCopy, 2, 2, "<attr> <from_rule>"},
"copy_no_overwrite": {cmdCopyNoOverwrite, 2, 2, "<attr> <from_rule>"},
"add": {cmdAdd, true, 2, -1, "<attr> <value(s)>"},
"new_load": {cmdNewLoad, false, 1, -1, "<path> <[to=]from(s)>"},
"comment": {cmdComment, true, 1, 3, "<attr>? <value>? <comment>"},
"print_comment": {cmdPrintComment, true, 0, 2, "<attr>? <value>?"},
"delete": {cmdDelete, true, 0, 0, ""},
"fix": {cmdFix, true, 0, -1, "<fix(es)>?"},
"move": {cmdMove, true, 3, -1, "<old_attr> <new_attr> <value(s)>"},
"new": {cmdNew, false, 2, 4, "<rule_kind> <rule_name> [(before|after) <relative_rule_name>]"},
"print": {cmdPrint, true, 0, -1, "<attribute(s)>"},
"remove": {cmdRemove, true, 1, -1, "<attr> <value(s)>"},
"remove_comment": {cmdRemoveComment, true, 0, 2, "<attr>? <value>?"},
"rename": {cmdRename, true, 2, 2, "<old_attr> <new_attr>"},
"replace": {cmdReplace, true, 3, 3, "<attr> <old_value> <new_value>"},
"substitute": {cmdSubstitute, true, 3, 3, "<attr> <old_regexp> <new_template>"},
"set": {cmdSet, true, 1, -1, "<attr> <value(s)>"},
"set_if_absent": {cmdSetIfAbsent, true, 1, -1, "<attr> <value(s)>"},
"copy": {cmdCopy, true, 2, 2, "<attr> <from_rule>"},
"copy_no_overwrite": {cmdCopyNoOverwrite, true, 2, 2, "<attr> <from_rule>"},
"dict_add": {cmdDictAdd, true, 2, -1, "<attr> <(key:value)(s)>"},
"dict_set": {cmdDictSet, true, 2, -1, "<attr> <(key:value)(s)>"},
"dict_remove": {cmdDictRemove, true, 2, -1, "<attr> <key(s)>"},
}
func expandTargets(f *build.File, rule string) ([]*build.Rule, error) {
@@ -530,16 +665,12 @@ func filterRules(opts *Options, rules []*build.Rule) (result []*build.Rule) {
return rules
}
for _, rule := range rules {
acceptableType := false
for _, filterType := range opts.FilterRuleTypes {
if rule.Kind() == filterType {
acceptableType = true
result = append(result, rule)
break
}
}
if acceptableType || rule.Kind() == "package" {
result = append(result, rule)
}
}
return
}
@@ -564,6 +695,7 @@ func checkCommandUsage(name string, cmd CommandInfo, count int) {
name, cmd.MaxArg)
}
Usage()
os.Exit(1)
}
// Match text that only contains spaces if they're escaped with '\'.
@@ -630,16 +762,13 @@ type rewriteResult struct {
// getGlobalVariables returns the global variable assignments in the provided list of expressions.
// That is, for each variable assignment of the form
// a = v
// vars["a"] will contain the BinaryExpr whose Y value is the assignment "a = v".
func getGlobalVariables(exprs []build.Expr) (vars map[string]*build.BinaryExpr) {
vars = make(map[string]*build.BinaryExpr)
// vars["a"] will contain the AssignExpr whose RHS value is the assignment "a = v".
func getGlobalVariables(exprs []build.Expr) (vars map[string]*build.AssignExpr) {
vars = make(map[string]*build.AssignExpr)
for _, expr := range exprs {
if binExpr, ok := expr.(*build.BinaryExpr); ok {
if binExpr.Op != "=" {
continue
}
if lhs, ok := binExpr.X.(*build.LiteralExpr); ok {
vars[lhs.Token] = binExpr
if as, ok := expr.(*build.AssignExpr); ok {
if lhs, ok := as.LHS.(*build.Ident); ok {
vars[lhs.Name] = as
}
}
}
@@ -695,12 +824,12 @@ func rewrite(opts *Options, commandsForFile commandsForFile) *rewriteResult {
}
}
f, err := build.Parse(name, data)
f, err := build.ParseBuild(name, data)
if err != nil {
return &rewriteResult{file: name, errs: []error{err}}
}
vars := map[string]*build.BinaryExpr{}
vars := map[string]*build.AssignExpr{}
if opts.EditVariables {
vars = getGlobalVariables(f.Stmt)
}
@@ -726,8 +855,14 @@ func rewrite(opts *Options, commandsForFile commandsForFile) *rewriteResult {
}
targets = filterRules(opts, targets)
for _, cmd := range commands {
for _, r := range targets {
cmdInfo := AllCommands[cmd.tokens[0]]
cmdInfo := AllCommands[cmd.tokens[0]]
// Depending on whether a transformation is rule-specific or not, it should be applied to
// every rule that satisfies the filter or just once to the file.
cmdTargets := targets
if !cmdInfo.PerRule {
cmdTargets = []*build.Rule{nil}
}
for _, r := range cmdTargets {
record := &apipb.Output_Record{}
newf, err := cmdInfo.Fn(opts, CmdEnvironment{f, r, vars, absPkg, cmd.tokens[1:], record})
if len(record.Fields) != 0 {
@@ -793,7 +928,7 @@ func runBuildifier(opts *Options, f *build.File) ([]byte, error) {
return build.Format(f), nil
}
cmd := exec.Command(opts.Buildifier)
cmd := exec.Command(opts.Buildifier, "--type=build")
data := build.Format(f)
cmd.Stdin = bytes.NewBuffer(data)
stdout := bytes.NewBuffer(nil)
@@ -881,18 +1016,29 @@ func appendCommandsFromFile(opts *Options, commandsByFile map[string][]commandsF
reader = rc
defer rc.Close()
}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
line := scanner.Text()
appendCommandsFromReader(opts, reader, commandsByFile)
}
func appendCommandsFromReader(opts *Options, reader io.Reader, commandsByFile map[string][]commandsForTarget) {
r := bufio.NewReader(reader)
atEof := false
for !atEof {
line, err := r.ReadString('\n')
if err == io.EOF {
atEof = true
err = nil
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error while reading commands file: %v", err)
return
}
line = strings.TrimSuffix(line, "\n")
if line == "" {
continue
}
args := strings.Split(line, "|")
appendCommands(opts, commandsByFile, args)
}
if err := scanner.Err(); err != nil {
fmt.Fprintf(os.Stderr, "Error while reading commands file: %v", scanner.Err())
}
}
func printRecord(writer io.Writer, record *apipb.Output_Record) {

View File

@@ -75,7 +75,7 @@ func ShortenLabel(label string, pkg string) string {
if !ShortenLabelsFlag {
return label
}
if !strings.HasPrefix(label, "//") {
if !strings.Contains(label, "//") {
// It doesn't look like a long label, so we preserve it.
return label
}
@@ -85,7 +85,13 @@ func ShortenLabel(label string, pkg string) string {
}
slash := strings.LastIndex(labelPkg, "/")
if (slash >= 0 && labelPkg[slash+1:] == rule) || labelPkg == rule {
return "//" + labelPkg
if repo == "" {
return "//" + labelPkg
}
return "@" + repo + "//" + labelPkg
}
if strings.HasPrefix(label, "@") && repo == rule && labelPkg == "" {
return "@" + repo
}
return label
}
@@ -162,8 +168,8 @@ func ExprToRule(expr build.Expr, kind string) (*build.Rule, bool) {
if !ok {
return nil, false
}
k, ok := call.X.(*build.LiteralExpr)
if !ok || k.Token != kind {
k, ok := call.X.(*build.Ident)
if !ok || k.Name != kind {
return nil, false
}
return &build.Rule{call, ""}, true
@@ -180,21 +186,25 @@ func ExistingPackageDeclaration(f *build.File) *build.Rule {
}
// PackageDeclaration returns the package declaration. If it doesn't
// exist, it is created at the top of the BUILD file, after leading
// comments.
// exist, it is created at the top of the BUILD file, after optional
// docstring, comments, and load statements.
func PackageDeclaration(f *build.File) *build.Rule {
if pkg := ExistingPackageDeclaration(f); pkg != nil {
return pkg
}
all := []build.Expr{}
added := false
call := &build.CallExpr{X: &build.LiteralExpr{Token: "package"}}
// Skip CommentBlocks and find a place to insert the package declaration.
call := &build.CallExpr{X: &build.Ident{Name: "package"}}
for _, stmt := range f.Stmt {
_, ok := stmt.(*build.CommentBlock)
if !ok && !added {
all = append(all, call)
added = true
switch stmt.(type) {
case *build.CommentBlock, *build.LoadStmt, *build.StringExpr:
// Skip docstring, comments, and load statements to
// find a place to insert the package declaration.
default:
if !added {
all = append(all, call)
added = true
}
}
all = append(all, stmt)
}
@@ -213,14 +223,14 @@ func RemoveEmptyPackage(f *build.File) *build.File {
var all []build.Expr
for _, stmt := range f.Stmt {
if call, ok := stmt.(*build.CallExpr); ok {
functionName, ok := call.X.(*build.LiteralExpr)
if ok && functionName.Token == "package" && len(call.List) == 0 {
functionName, ok := call.X.(*build.Ident)
if ok && functionName.Name == "package" && len(call.List) == 0 {
continue
}
}
all = append(all, stmt)
}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all, Type: build.TypeBuild}
}
// InsertAfter inserts an expression after index i.
@@ -241,8 +251,8 @@ func IndexOfLast(stmt []build.Expr, Kind string) int {
if !ok {
continue
}
literal, ok := sAsCallExpr.X.(*build.LiteralExpr)
if ok && literal.Token == Kind {
literal, ok := sAsCallExpr.X.(*build.Ident)
if ok && literal.Name == Kind {
lastIndex = i
}
}
@@ -251,7 +261,7 @@ func IndexOfLast(stmt []build.Expr, Kind string) int {
// InsertAfterLastOfSameKind inserts an expression after the last expression of the same kind.
func InsertAfterLastOfSameKind(stmt []build.Expr, expr *build.CallExpr) []build.Expr {
index := IndexOfLast(stmt, expr.X.(*build.LiteralExpr).Token)
index := IndexOfLast(stmt, expr.X.(*build.Ident).Name)
if index == -1 {
return InsertAtEnd(stmt, expr)
}
@@ -329,7 +339,7 @@ func DeleteRule(f *build.File, rule *build.Rule) *build.File {
}
all = append(all, stmt)
}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all, Type: build.TypeBuild}
}
// DeleteRuleByName returns the AST without the rules that have the
@@ -347,7 +357,7 @@ func DeleteRuleByName(f *build.File, name string) *build.File {
all = append(all, stmt)
}
}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all, Type: build.TypeBuild}
}
// DeleteRuleByKind removes the rules of the specified kind from the AST.
@@ -360,12 +370,12 @@ func DeleteRuleByKind(f *build.File, kind string) *build.File {
all = append(all, stmt)
continue
}
k, ok := call.X.(*build.LiteralExpr)
if !ok || k.Token != kind {
k, ok := call.X.(*build.Ident)
if !ok || k.Name != kind {
all = append(all, stmt)
}
}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all}
return &build.File{Path: f.Path, Comments: f.Comments, Stmt: all, Type: build.TypeBuild}
}
// AllLists returns all the lists concatenated in an expression.
@@ -383,6 +393,21 @@ func AllLists(e build.Expr) []*build.ListExpr {
return nil
}
// AllSelects returns all the selects concatenated in an expression.
func AllSelects(e build.Expr) []*build.CallExpr {
switch e := e.(type) {
case *build.BinaryExpr:
if e.Op == "+" {
return append(AllSelects(e.X), AllSelects(e.Y)...)
}
case *build.CallExpr:
if x, ok := e.X.(*build.Ident); ok && x.Name == "select" {
return []*build.CallExpr{e}
}
}
return nil
}
// FirstList works in the same way as AllLists, except that it
// returns only one list, or nil.
func FirstList(e build.Expr) *build.ListExpr {
@@ -451,24 +476,208 @@ func ContainsComments(expr build.Expr, str string) bool {
return false
}
// RemoveEmptySelectsAndConcatLists iterates the tree in order to turn
// empty selects into empty lists and adjacent lists are concatenated
func RemoveEmptySelectsAndConcatLists(e build.Expr) build.Expr {
switch e := e.(type) {
case *build.BinaryExpr:
if e.Op == "+" {
e.X = RemoveEmptySelectsAndConcatLists(e.X)
e.Y = RemoveEmptySelectsAndConcatLists(e.Y)
x, xIsList := e.X.(*build.ListExpr)
y, yIsList := e.Y.(*build.ListExpr)
if xIsList && yIsList {
return &build.ListExpr{List: append(x.List, y.List...)}
}
if xIsList && len(x.List) == 0 {
return e.Y
}
if yIsList && len(y.List) == 0 {
return e.X
}
}
case *build.CallExpr:
if x, ok := e.X.(*build.Ident); ok && x.Name == "select" {
if len(e.List) == 0 {
return &build.ListExpr{List: []build.Expr{}}
}
if dict, ok := e.List[0].(*build.DictExpr); ok {
for _, keyVal := range dict.List {
if keyVal, ok := keyVal.(*build.KeyValueExpr); ok {
val, ok := keyVal.Value.(*build.ListExpr)
if !ok || len(val.List) > 0 {
return e
}
} else {
return e
}
}
return &build.ListExpr{List: []build.Expr{}}
}
}
}
return e
}
// ComputeIntersection returns the intersection of the two lists given as parameters;
// if the containing elements are not build.StringExpr, the result will be nil.
func ComputeIntersection(list1, list2 []build.Expr) []build.Expr {
if list1 == nil || list2 == nil {
return nil
}
if len(list2) == 0 {
return []build.Expr{}
}
i := 0
for j, common := range list1 {
if common, ok := common.(*build.StringExpr); ok {
found := false
for _, elem := range list2 {
if str, ok := elem.(*build.StringExpr); ok {
if str.Value == common.Value {
found = true
break
}
} else {
return nil
}
}
if found {
list1[i] = list1[j]
i++
}
} else {
return nil
}
}
return list1[:i]
}
// SelectListsIntersection returns the intersection of the lists of strings inside
// the dictionary argument of the select expression given as a parameter
func SelectListsIntersection(sel *build.CallExpr, pkg string) (intersection []build.Expr) {
if len(sel.List) == 0 || len(sel.List) > 1 {
return nil
}
dict, ok := sel.List[0].(*build.DictExpr)
if !ok || len(dict.List) == 0 {
return nil
}
if keyVal, ok := dict.List[0].(*build.KeyValueExpr); ok {
if val, ok := keyVal.Value.(*build.ListExpr); ok {
intersection = make([]build.Expr, len(val.List))
copy(intersection, val.List)
}
}
for _, keyVal := range dict.List[1:] {
if keyVal, ok := keyVal.(*build.KeyValueExpr); ok {
if val, ok := keyVal.Value.(*build.ListExpr); ok {
intersection = ComputeIntersection(intersection, val.List)
if len(intersection) == 0 {
return intersection
}
} else {
return nil
}
} else {
return nil
}
}
return intersection
}
// ResolveAttr extracts common elements of the lists inside select dictionaries
// and adds them at attribute level rather than select level, as well as turns
// empty selects into empty lists and concatenates adjacent lists
func ResolveAttr(r *build.Rule, attr, pkg string) {
var toExtract []build.Expr
e := r.Attr(attr)
if e == nil {
return
}
for _, sel := range AllSelects(e) {
intersection := SelectListsIntersection(sel, pkg)
if intersection != nil {
toExtract = append(toExtract, intersection...)
}
}
for _, common := range toExtract {
e = AddValueToList(e, pkg, common, false) // this will also remove them from selects
}
r.SetAttr(attr, RemoveEmptySelectsAndConcatLists(e))
}
// SelectDelete removes the item from all the lists which are values
// in the dictionary of every select
func SelectDelete(e build.Expr, item, pkg string, deleted **build.StringExpr) {
for _, sel := range AllSelects(e) {
if len(sel.List) == 0 {
continue
}
if dict, ok := sel.List[0].(*build.DictExpr); ok {
for _, keyVal := range dict.List {
if keyVal, ok := keyVal.(*build.KeyValueExpr); ok {
if val, ok := keyVal.Value.(*build.ListExpr); ok {
RemoveFromList(val, item, pkg, deleted)
}
}
}
}
}
}
// RemoveFromList removes one element from a ListExpr and stores
// the deleted StringExpr at the address pointed by the last parameter
func RemoveFromList(li *build.ListExpr, item, pkg string, deleted **build.StringExpr) {
var all []build.Expr
for _, elem := range li.List {
if str, ok := elem.(*build.StringExpr); ok {
if LabelsEqual(str.Value, item, pkg) && (DeleteWithComments || !hasComments(str)) {
if deleted != nil {
*deleted = str
}
continue
}
}
all = append(all, elem)
}
li.List = all
}
// ListDelete deletes the item from a list expression in e and returns
// the StringExpr deleted, or nil otherwise.
func ListDelete(e build.Expr, item, pkg string) (deleted *build.StringExpr) {
if unquoted, _, err := build.Unquote(item); err == nil {
item = unquoted
}
deleted = nil
item = ShortenLabel(item, pkg)
for _, li := range AllLists(e) {
var all []build.Expr
for _, elem := range li.List {
if str, ok := elem.(*build.StringExpr); ok {
if LabelsEqual(str.Value, item, pkg) && (DeleteWithComments || !hasComments(str)) {
deleted = str
continue
}
}
all = append(all, elem)
}
li.List = all
RemoveFromList(li, item, pkg, &deleted)
}
SelectDelete(e, item, pkg, &deleted)
return deleted
}
@@ -582,13 +791,13 @@ func attributeMustNotBeSorted(rule, attr string) bool {
// getVariable returns the binary expression that assignes a variable to expr, if expr is
// an identifier of a variable that vars contains a mapping for.
func getVariable(expr build.Expr, vars *map[string]*build.BinaryExpr) (varAssignment *build.BinaryExpr) {
func getVariable(expr build.Expr, vars *map[string]*build.AssignExpr) (varAssignment *build.AssignExpr) {
if vars == nil {
return nil
}
if literal, ok := expr.(*build.LiteralExpr); ok {
if varAssignment = (*vars)[literal.Token]; varAssignment != nil {
if literal, ok := expr.(*build.Ident); ok {
if varAssignment = (*vars)[literal.Name]; varAssignment != nil {
return varAssignment
}
}
@@ -604,10 +813,14 @@ func AddValueToList(oldList build.Expr, pkg string, item build.Expr, sorted bool
}
str, ok := item.(*build.StringExpr)
if ok && ListFind(oldList, str.Value, pkg) != nil {
// The value is already in the list.
return oldList
if ok {
if ListFind(oldList, str.Value, pkg) != nil {
// The value is already in the list.
return oldList
}
SelectDelete(oldList, str.Value, pkg, nil)
}
li := FirstList(oldList)
if li != nil {
if sorted {
@@ -623,11 +836,11 @@ func AddValueToList(oldList build.Expr, pkg string, item build.Expr, sorted bool
}
// AddValueToListAttribute adds the given item to the list attribute identified by name and pkg.
func AddValueToListAttribute(r *build.Rule, name string, pkg string, item build.Expr, vars *map[string]*build.BinaryExpr) {
func AddValueToListAttribute(r *build.Rule, name string, pkg string, item build.Expr, vars *map[string]*build.AssignExpr) {
old := r.Attr(name)
sorted := !attributeMustNotBeSorted(r.Kind(), name)
if varAssignment := getVariable(old, vars); varAssignment != nil {
varAssignment.Y = AddValueToList(varAssignment.Y, pkg, item, sorted)
varAssignment.RHS = AddValueToList(varAssignment.RHS, pkg, item, sorted)
} else {
r.SetAttr(name, AddValueToList(old, pkg, item, sorted))
}
@@ -635,7 +848,7 @@ func AddValueToListAttribute(r *build.Rule, name string, pkg string, item build.
// MoveAllListAttributeValues moves all values from list attribute oldAttr to newAttr,
// and deletes oldAttr.
func MoveAllListAttributeValues(rule *build.Rule, oldAttr, newAttr, pkg string, vars *map[string]*build.BinaryExpr) error {
func MoveAllListAttributeValues(rule *build.Rule, oldAttr, newAttr, pkg string, vars *map[string]*build.AssignExpr) error {
if rule.Attr(oldAttr) == nil {
return fmt.Errorf("no attribute %s found in %s", oldAttr, rule.Name())
}
@@ -672,21 +885,58 @@ func DictionarySet(dict *build.DictExpr, key string, value build.Expr) build.Exp
return nil
}
// DictionaryGet looks for the key in the dictionary expression, and returns the
// current value. If it is unset, it returns nil.
func DictionaryGet(dict *build.DictExpr, key string) build.Expr {
for _, e := range dict.List {
kv, ok := e.(*build.KeyValueExpr)
if !ok {
continue
}
if k, ok := kv.Key.(*build.StringExpr); ok && k.Value == key {
return kv.Value
}
}
return nil
}
// DictionaryDelete looks for the key in the dictionary expression. If the key exists,
// it removes the key-value pair and returns it. Otherwise it returns nil.
func DictionaryDelete(dict *build.DictExpr, key string) (deleted build.Expr) {
if unquoted, _, err := build.Unquote(key); err == nil {
key = unquoted
}
deleted = nil
var all []build.Expr
for _, e := range dict.List {
kv, _ := e.(*build.KeyValueExpr)
if k, ok := kv.Key.(*build.StringExpr); ok {
if k.Value == key {
deleted = kv
} else {
all = append(all, e)
}
}
}
dict.List = all
return deleted
}
// RenameAttribute renames an attribute in a rule.
func RenameAttribute(r *build.Rule, oldName, newName string) error {
if r.Attr(newName) != nil {
return fmt.Errorf("attribute %s already exists in rule %s", newName, r.Name())
}
for _, kv := range r.Call.List {
as, ok := kv.(*build.BinaryExpr)
if !ok || as.Op != "=" {
as, ok := kv.(*build.AssignExpr)
if !ok {
continue
}
k, ok := as.X.(*build.LiteralExpr)
if !ok || k.Token != oldName {
k, ok := as.LHS.(*build.Ident)
if !ok || k.Name != oldName {
continue
}
k.Token = newName
k.Name = newName
return nil
}
return fmt.Errorf("no attribute %s found in rule %s", oldName, r.Name())
@@ -700,8 +950,8 @@ func EditFunction(v build.Expr, name string, f func(x *build.CallExpr, stk []bui
if !ok {
return nil
}
fct, ok := call.X.(*build.LiteralExpr)
if !ok || fct.Token != name {
fct, ok := call.X.(*build.Ident)
if !ok || fct.Name != name {
return nil
}
return f(call, stk)
@@ -709,107 +959,137 @@ func EditFunction(v build.Expr, name string, f func(x *build.CallExpr, stk []bui
}
// UsedSymbols returns the set of symbols used in the BUILD file (variables, function names).
func UsedSymbols(f *build.File) map[string]bool {
func UsedSymbols(stmt build.Expr) map[string]bool {
symbols := make(map[string]bool)
build.Walk(f, func(expr build.Expr, stack []build.Expr) {
literal, ok := expr.(*build.LiteralExpr)
build.Walk(stmt, func(expr build.Expr, stack []build.Expr) {
// Don't traverse inside load statements
if len(stack) > 0 {
if _, ok := stack[len(stack)-1].(*build.LoadStmt); ok {
return
}
}
literal, ok := expr.(*build.Ident)
if !ok {
return
}
// Check if we are on the left-side of an assignment
for _, e := range stack {
if as, ok := e.(*build.BinaryExpr); ok {
if as.Op == "=" && as.X == expr {
if as, ok := e.(*build.AssignExpr); ok {
if as.LHS == expr {
return
}
}
}
symbols[literal.Token] = true
symbols[literal.Name] = true
})
return symbols
}
func newLoad(args []string) *build.CallExpr {
load := &build.CallExpr{
X: &build.LiteralExpr{
Token: "load",
// NewLoad creates a new LoadStmt node
func NewLoad(location string, from, to []string) *build.LoadStmt {
load := &build.LoadStmt{
Module: &build.StringExpr{
Value: location,
},
List: []build.Expr{},
ForceCompact: true,
}
for _, a := range args {
load.List = append(load.List, &build.StringExpr{Value: a})
for i := range from {
load.From = append(load.From, &build.Ident{Name: from[i]})
load.To = append(load.To, &build.Ident{Name: to[i]})
}
return load
}
// appendLoad tries to find an existing load location and append symbols to it.
func appendLoad(stmts []build.Expr, args []string) bool {
if len(args) == 0 {
return false
// AppendToLoad appends symbols to an existing load statement
// Returns true if the statement was acually edited (if the required symbols haven't been
// loaded yet)
func AppendToLoad(load *build.LoadStmt, from, to []string) bool {
symbolsToLoad := make(map[string]string)
for i, s := range to {
symbolsToLoad[s] = from[i]
}
location := args[0]
symbolsToLoad := make(map[string]bool)
for _, s := range args[1:] {
symbolsToLoad[s] = true
}
var lastLoad *build.CallExpr
for _, s := range stmts {
call, ok := s.(*build.CallExpr)
if !ok {
continue
}
if l, ok := call.X.(*build.LiteralExpr); !ok || l.Token != "load" {
continue
}
if len(call.List) < 2 {
continue
}
if s, ok := call.List[0].(*build.StringExpr); !ok || s.Value != location {
continue // Loads a different file.
}
for _, arg := range call.List[1:] {
if s, ok := arg.(*build.StringExpr); ok {
delete(symbolsToLoad, s.Value) // Already loaded.
}
}
// Remember the last insert location, but potentially remove more symbols
// that are already loaded in other subsequent calls.
lastLoad = call
for _, ident := range load.To {
delete(symbolsToLoad, ident.Name) // Already loaded.
}
if lastLoad == nil {
if len(symbolsToLoad) == 0 {
return false
}
// Append the remaining loads to the last load location.
// Append the remaining loads to the load statement.
sortedSymbols := []string{}
for s := range symbolsToLoad {
sortedSymbols = append(sortedSymbols, s)
}
sort.Strings(sortedSymbols)
for _, s := range sortedSymbols {
lastLoad.List = append(lastLoad.List, &build.StringExpr{Value: s})
load.From = append(load.From, &build.Ident{Name: symbolsToLoad[s]})
load.To = append(load.To, &build.Ident{Name: s})
}
return true
}
// appendLoad tries to find an existing load location and append symbols to it.
func appendLoad(stmts []build.Expr, location string, from, to []string) bool {
symbolsToLoad := make(map[string]string)
for i, s := range to {
symbolsToLoad[s] = from[i]
}
var lastLoad *build.LoadStmt
for _, s := range stmts {
load, ok := s.(*build.LoadStmt)
if !ok {
continue
}
if load.Module.Value != location {
continue // Loads a different file.
}
for _, ident := range load.To {
delete(symbolsToLoad, ident.Name) // Already loaded.
}
// Remember the last insert location, but potentially remove more symbols
// that are already loaded in other subsequent calls.
lastLoad = load
}
if lastLoad == nil {
return false
}
// Append the remaining loads to the last load location.
from = []string{}
to = []string{}
for t, f := range symbolsToLoad {
from = append(from, f)
to = append(to, t)
}
AppendToLoad(lastLoad, from, to)
return true
}
// InsertLoad inserts a load statement at the top of the list of statements.
// The load statement is constructed using args. Symbols that are already loaded
// The load statement is constructed using a string location and two slices of from- and to-symbols.
// The function panics if the slices aren't of the same lentgh. Symbols that are already loaded
// from the given filepath are ignored. If stmts already contains a load for the
// location in arguments, appends the symbols to load to it.
func InsertLoad(stmts []build.Expr, args []string) []build.Expr {
if appendLoad(stmts, args) {
func InsertLoad(stmts []build.Expr, location string, from, to []string) []build.Expr {
if len(from) != len(to) {
panic(fmt.Errorf("length mismatch: %v (from) and %v (to)", len(from), len(to)))
}
if appendLoad(stmts, location, from, to) {
return stmts
}
load := newLoad(args)
load := NewLoad(location, from, to)
var all []build.Expr
added := false
for _, stmt := range stmts {
for i, stmt := range stmts {
_, isComment := stmt.(*build.CommentBlock)
if isComment || added {
_, isString := stmt.(*build.StringExpr)
isDocString := isString && i == 0
if isComment || isDocString || added {
all = append(all, stmt)
continue
}

View File

@@ -315,17 +315,17 @@ func usePlusEqual(f *build.File) bool {
if !ok || len(call.List) != 1 {
continue
}
obj, ok := dot.X.(*build.LiteralExpr)
obj, ok := dot.X.(*build.Ident)
if !ok {
continue
}
var fix *build.BinaryExpr
var fix *build.AssignExpr
if dot.Name == "extend" {
fix = &build.BinaryExpr{X: obj, Op: "+=", Y: call.List[0]}
fix = &build.AssignExpr{LHS: obj, Op: "+=", RHS: call.List[0]}
} else if dot.Name == "append" {
list := &build.ListExpr{List: []build.Expr{call.List[0]}}
fix = &build.BinaryExpr{X: obj, Op: "+=", Y: list}
fix = &build.AssignExpr{LHS: obj, Op: "+=", RHS: list}
} else {
continue
}
@@ -340,13 +340,17 @@ func isNonemptyComment(comment *build.Comments) bool {
return len(comment.Before)+len(comment.Suffix)+len(comment.After) > 0
}
// Checks whether a call or any of its arguments have a comment
func hasComment(call *build.CallExpr) bool {
if isNonemptyComment(call.Comment()) {
// Checks whether a load statement or any of its arguments have a comment
func hasComment(load *build.LoadStmt) bool {
if isNonemptyComment(load.Comment()) {
return true
}
for _, arg := range call.List {
if isNonemptyComment(arg.Comment()) {
if isNonemptyComment(load.Module.Comment()) {
return true
}
for i := range load.From {
if isNonemptyComment(load.From[i].Comment()) || isNonemptyComment(load.To[i].Comment()) {
return true
}
}
@@ -357,40 +361,35 @@ func hasComment(call *build.CallExpr) bool {
// It also cleans symbols loaded multiple times, sorts symbol list, and removes load
// statements when the list is empty.
func cleanUnusedLoads(f *build.File) bool {
// If the file needs preprocessing, leave it alone.
for _, stmt := range f.Stmt {
if _, ok := stmt.(*build.PythonBlock); ok {
return false
}
}
symbols := UsedSymbols(f)
fixed := false
var all []build.Expr
for _, stmt := range f.Stmt {
rule, ok := ExprToRule(stmt, "load")
if !ok || len(rule.Call.List) == 0 || hasComment(rule.Call) {
load, ok := stmt.(*build.LoadStmt)
if !ok || hasComment(load) {
all = append(all, stmt)
continue
}
var args []build.Expr
for _, arg := range rule.Call.List[1:] { // first argument is the path, we keep it
symbol, ok := loadedSymbol(arg)
if !ok || symbols[symbol] {
args = append(args, arg)
if ok {
// If the same symbol is loaded twice, we'll remove it.
delete(symbols, symbol)
}
var fromSymbols, toSymbols []*build.Ident
for i := range load.From {
fromSymbol := load.From[i]
toSymbol := load.To[i]
if symbols[toSymbol.Name] {
// The symbol is actually used
fromSymbols = append(fromSymbols, fromSymbol)
toSymbols = append(toSymbols, toSymbol)
// If the same symbol is loaded twice, we'll remove it.
delete(symbols, toSymbol.Name)
} else {
fixed = true
}
}
if len(args) > 0 { // Keep the load statement if it loads at least one symbol.
li := &build.ListExpr{List: args}
build.SortStringList(li)
rule.Call.List = append(rule.Call.List[:1], li.List...)
all = append(all, rule.Call)
if len(toSymbols) > 0 { // Keep the load statement if it loads at least one symbol.
sort.Sort(loadArgs{fromSymbols, toSymbols})
load.From = fromSymbols
load.To = toSymbols
all = append(all, load)
} else {
fixed = true
}
@@ -399,22 +398,6 @@ func cleanUnusedLoads(f *build.File) bool {
return fixed
}
// loadedSymbol parses the symbol token from a load statement argument,
// supporting aliases.
func loadedSymbol(arg build.Expr) (string, bool) {
symbol, ok := arg.(*build.StringExpr)
if ok {
return symbol.Value, ok
}
// try an aliased symbol
if binExpr, ok := arg.(*build.BinaryExpr); ok && binExpr.Op == "=" {
if keyExpr, ok := binExpr.X.(*build.LiteralExpr); ok {
return keyExpr.Token, ok
}
}
return "", false
}
// movePackageDeclarationToTheTop ensures that the call to package() is done
// before everything else (except comments).
func movePackageDeclarationToTheTop(f *build.File) bool {
@@ -426,9 +409,10 @@ func movePackageDeclarationToTheTop(f *build.File) bool {
inserted := false // true when the package declaration has been inserted
for _, stmt := range f.Stmt {
_, isComment := stmt.(*build.CommentBlock)
_, isBinaryExpr := stmt.(*build.BinaryExpr) // e.g. variable declaration
_, isLoad := ExprToRule(stmt, "load")
if isComment || isBinaryExpr || isLoad {
_, isString := stmt.(*build.StringExpr) // typically a docstring
_, isAssignExpr := stmt.(*build.AssignExpr) // e.g. variable declaration
_, isLoad := stmt.(*build.LoadStmt)
if isComment || isString || isAssignExpr || isLoad {
all = append(all, stmt)
continue
}
@@ -567,3 +551,24 @@ func FixFile(f *build.File, pkg string, fixes []string) *build.File {
}
return f
}
// A wrapper for a LoadStmt's From and To slices for consistent sorting of their contents.
// It's assumed that the following slices have the same length, the contents are sorted by
// the `To` attribute, the items of `From` are swapped exactly the same way as the items of `To`.
type loadArgs struct {
From []*build.Ident
To []*build.Ident
}
func (args loadArgs) Len() int {
return len(args.From)
}
func (args loadArgs) Swap(i, j int) {
args.From[i], args.From[j] = args.From[j], args.From[i]
args.To[i], args.To[j] = args.To[j], args.To[i]
}
func (args loadArgs) Less(i, j int) bool {
return args.To[i].Name < args.To[j].Name
}

Binary file not shown.

View File

@@ -5,64 +5,63 @@ import buildpb "github.com/bazelbuild/buildtools/build_proto"
var TypeOf = map[string]buildpb.Attribute_Discriminator{
"aar": buildpb.Attribute_LABEL,
"absolute_path_profile": buildpb.Attribute_STRING,
"actual": buildpb.Attribute_LABEL,
"aliases": buildpb.Attribute_STRING_LIST,
"all_files": buildpb.Attribute_LABEL,
"alwayslink": buildpb.Attribute_BOOLEAN,
"app_asset_catalogs": buildpb.Attribute_LABEL_LIST,
"app_bundle_id": buildpb.Attribute_STRING,
"app_deps": buildpb.Attribute_LABEL_LIST,
"app_entitlements": buildpb.Attribute_LABEL,
"app_icon": buildpb.Attribute_STRING,
"app_infoplists": buildpb.Attribute_LABEL_LIST,
"app_name": buildpb.Attribute_STRING,
"app_provisioning_profile": buildpb.Attribute_LABEL,
"app_resources": buildpb.Attribute_LABEL_LIST,
"app_storyboards": buildpb.Attribute_LABEL_LIST,
"app_strings": buildpb.Attribute_LABEL_LIST,
"app_structured_resources": buildpb.Attribute_LABEL_LIST,
"api_level": buildpb.Attribute_INTEGER,
"ar_files": buildpb.Attribute_LABEL,
"archives": buildpb.Attribute_LABEL_LIST,
"args": buildpb.Attribute_STRING_LIST,
"artifact": buildpb.Attribute_STRING,
"as_files": buildpb.Attribute_LABEL,
"asset_catalogs": buildpb.Attribute_LABEL_LIST,
"assets": buildpb.Attribute_LABEL_LIST,
"assets_dir": buildpb.Attribute_STRING,
"avoid_deps": buildpb.Attribute_LABEL_LIST,
"binary": buildpb.Attribute_LABEL,
"binary_type": buildpb.Attribute_STRING,
"blacklisted_protos": buildpb.Attribute_LABEL_LIST,
"bootclasspath": buildpb.Attribute_LABEL_LIST,
"build_file": buildpb.Attribute_STRING,
"build_file_content": buildpb.Attribute_STRING,
"bundle_id": buildpb.Attribute_STRING,
"build_tools_version": buildpb.Attribute_STRING,
"bundle_imports": buildpb.Attribute_LABEL_LIST,
"bundle_loader": buildpb.Attribute_LABEL,
"bundles": buildpb.Attribute_LABEL_LIST,
"cache": buildpb.Attribute_INTEGER,
"classpath_resources": buildpb.Attribute_LABEL_LIST,
"cmd": buildpb.Attribute_STRING,
"command_line": buildpb.Attribute_STRING,
"commit": buildpb.Attribute_STRING,
"compatible_with": buildpb.Attribute_LABEL_LIST,
"compiler": buildpb.Attribute_STRING,
"compiler_files": buildpb.Attribute_LABEL,
"constraint_setting": buildpb.Attribute_LABEL,
"constraint_values": buildpb.Attribute_LABEL_LIST,
"constraints": buildpb.Attribute_STRING_LIST,
"copts": buildpb.Attribute_STRING_LIST,
"coverage_files": buildpb.Attribute_LABEL,
"cpu": buildpb.Attribute_STRING,
"create_executable": buildpb.Attribute_BOOLEAN,
"crunch_png": buildpb.Attribute_BOOLEAN,
"custom_package": buildpb.Attribute_STRING,
"data": buildpb.Attribute_LABEL_LIST,
"datamodels": buildpb.Attribute_LABEL_LIST,
"debug_key": buildpb.Attribute_LABEL,
"default": buildpb.Attribute_LABEL,
"default_copts": buildpb.Attribute_STRING_LIST,
"default_deprecation": buildpb.Attribute_STRING,
"default_hdrs_check": buildpb.Attribute_STRING,
"default_ios_sdk_version": buildpb.Attribute_STRING,
"default_macosx_sdk_version": buildpb.Attribute_STRING,
"default_macos_sdk_version": buildpb.Attribute_STRING,
"default_properties": buildpb.Attribute_LABEL,
"default_python_version": buildpb.Attribute_STRING,
"default_testonly": buildpb.Attribute_BOOLEAN,
"default_tvos_sdk_version": buildpb.Attribute_STRING,
"default_visibility": buildpb.Attribute_STRING_LIST,
"default_watchos_sdk_version": buildpb.Attribute_STRING,
"define_values": buildpb.Attribute_STRING_DICT,
"defines": buildpb.Attribute_STRING_LIST,
"densities": buildpb.Attribute_STRING_LIST,
"deploy_manifest_lines": buildpb.Attribute_STRING_LIST,
@@ -74,52 +73,51 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"dwp_files": buildpb.Attribute_LABEL,
"dylibs": buildpb.Attribute_LABEL_LIST,
"dynamic_runtime_libs": buildpb.Attribute_LABEL_LIST,
"enable_data_binding": buildpb.Attribute_BOOLEAN,
"enable_modules": buildpb.Attribute_BOOLEAN,
"encoding": buildpb.Attribute_STRING,
"entitlements": buildpb.Attribute_LABEL,
"entry_classes": buildpb.Attribute_STRING_LIST,
"exec_compatible_with": buildpb.Attribute_LABEL_LIST,
"executable": buildpb.Attribute_BOOLEAN,
"exported_plugins": buildpb.Attribute_LABEL_LIST,
"exports": buildpb.Attribute_LABEL_LIST,
"exports_manifest": buildpb.Attribute_BOOLEAN,
"exports_manifest": buildpb.Attribute_TRISTATE,
"expression": buildpb.Attribute_STRING,
"ext_bundle_id": buildpb.Attribute_STRING,
"ext_entitlements": buildpb.Attribute_LABEL,
"ext_families": buildpb.Attribute_STRING_LIST,
"ext_infoplists": buildpb.Attribute_LABEL_LIST,
"ext_provisioning_profile": buildpb.Attribute_LABEL,
"ext_resources": buildpb.Attribute_LABEL_LIST,
"ext_strings": buildpb.Attribute_LABEL_LIST,
"ext_structured_resources": buildpb.Attribute_LABEL_LIST,
"extclasspath": buildpb.Attribute_LABEL_LIST,
"extensions": buildpb.Attribute_LABEL_LIST,
"extension_safe": buildpb.Attribute_BOOLEAN,
"extra_actions": buildpb.Attribute_LABEL_LIST,
"extra_srcs": buildpb.Attribute_LABEL_LIST,
"families": buildpb.Attribute_STRING_LIST,
"features": buildpb.Attribute_STRING_LIST,
"files": buildpb.Attribute_LABEL_LIST,
"flaky": buildpb.Attribute_BOOLEAN,
"forcibly_disable_header_compilation": buildpb.Attribute_BOOLEAN,
"framework_imports": buildpb.Attribute_LABEL_LIST,
"genclass": buildpb.Attribute_LABEL_LIST,
"generates_api": buildpb.Attribute_BOOLEAN,
"hdrs": buildpb.Attribute_LABEL_LIST,
"header_compiler": buildpb.Attribute_LABEL_LIST,
"heuristic_label_expansion": buildpb.Attribute_BOOLEAN,
"horizontal_resolution": buildpb.Attribute_INTEGER,
"idl_import_root": buildpb.Attribute_STRING,
"idl_parcelables": buildpb.Attribute_LABEL_LIST,
"idl_preprocessed": buildpb.Attribute_LABEL_LIST,
"idl_srcs": buildpb.Attribute_LABEL_LIST,
"ijar": buildpb.Attribute_LABEL_LIST,
"imports": buildpb.Attribute_STRING_LIST,
"include_prefix": buildpb.Attribute_STRING,
"includes": buildpb.Attribute_STRING_LIST,
"incremental_dexing": buildpb.Attribute_TRISTATE,
"infoplist": buildpb.Attribute_LABEL,
"infoplists": buildpb.Attribute_LABEL_LIST,
"init_submodules": buildpb.Attribute_BOOLEAN,
"ios_device_arg": buildpb.Attribute_STRING_LIST,
"ios_test_target_device": buildpb.Attribute_LABEL,
"ios_version": buildpb.Attribute_STRING,
"ipa_post_processor": buildpb.Attribute_LABEL,
"instruments": buildpb.Attribute_LABEL,
"interface_library": buildpb.Attribute_LABEL,
"interpreter": buildpb.Attribute_LABEL,
"interpreter_path": buildpb.Attribute_STRING,
"is_dynamic": buildpb.Attribute_BOOLEAN,
"jars": buildpb.Attribute_LABEL_LIST,
"java_home": buildpb.Attribute_STRING,
"javabuilder": buildpb.Attribute_LABEL_LIST,
"javac": buildpb.Attribute_LABEL_LIST,
"javac_supports_workers": buildpb.Attribute_BOOLEAN,
@@ -127,9 +125,9 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"jre_deps": buildpb.Attribute_LABEL_LIST,
"jvm_flags": buildpb.Attribute_STRING_LIST,
"jvm_opts": buildpb.Attribute_STRING_LIST,
"launch_image": buildpb.Attribute_STRING,
"launch_storyboard": buildpb.Attribute_LABEL,
"launcher": buildpb.Attribute_LABEL,
"legacy_create_init": buildpb.Attribute_BOOLEAN,
"libc": buildpb.Attribute_STRING,
"licenses": buildpb.Attribute_LICENSE,
"linker_files": buildpb.Attribute_LABEL,
"linkopts": buildpb.Attribute_STRING_LIST,
@@ -144,10 +142,9 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"main_dex_proguard_specs": buildpb.Attribute_LABEL_LIST,
"malloc": buildpb.Attribute_LABEL,
"manifest": buildpb.Attribute_LABEL,
"manifest_merger": buildpb.Attribute_STRING,
"manifest_values": buildpb.Attribute_STRING_DICT,
"message": buildpb.Attribute_STRING,
"misc": buildpb.Attribute_STRING_LIST,
"minimum_os_version": buildpb.Attribute_STRING,
"mnemonics": buildpb.Attribute_STRING_LIST,
"module_map": buildpb.Attribute_LABEL,
"multidex": buildpb.Attribute_STRING,
@@ -158,7 +155,8 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"non_arc_srcs": buildpb.Attribute_LABEL_LIST,
"non_propagated_deps": buildpb.Attribute_LABEL_LIST,
"objcopy_files": buildpb.Attribute_LABEL,
"options_file": buildpb.Attribute_LABEL,
"oneversion": buildpb.Attribute_LABEL,
"oneversion_whitelist": buildpb.Attribute_LABEL,
"opts": buildpb.Attribute_STRING_LIST,
"out": buildpb.Attribute_STRING,
"out_templates": buildpb.Attribute_STRING_LIST,
@@ -166,32 +164,44 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"output_licenses": buildpb.Attribute_LICENSE,
"output_to_bindir": buildpb.Attribute_BOOLEAN,
"outs": buildpb.Attribute_STRING_LIST,
"package_configuration": buildpb.Attribute_LABEL_LIST,
"packages": buildpb.Attribute_LABEL_LIST,
"path": buildpb.Attribute_STRING,
"pch": buildpb.Attribute_LABEL,
"per_proto_includes": buildpb.Attribute_BOOLEAN,
"platform_apks": buildpb.Attribute_LABEL_LIST,
"platform_type": buildpb.Attribute_STRING,
"plugin": buildpb.Attribute_LABEL,
"plugins": buildpb.Attribute_LABEL_LIST,
"portable_proto_filters": buildpb.Attribute_LABEL_LIST,
"prefix": buildpb.Attribute_STRING,
"pregenerate_oat_files_for_tests": buildpb.Attribute_BOOLEAN,
"processor_class": buildpb.Attribute_STRING,
"profile": buildpb.Attribute_LABEL,
"proguard_apply_dictionary": buildpb.Attribute_LABEL,
"proguard_apply_mapping": buildpb.Attribute_LABEL,
"proguard_generate_mapping": buildpb.Attribute_BOOLEAN,
"proguard_specs": buildpb.Attribute_LABEL_LIST,
"provisioning_profile": buildpb.Attribute_LABEL,
"proto": buildpb.Attribute_STRING,
"proto_source_root": buildpb.Attribute_STRING,
"pytype_deps": buildpb.Attribute_LABEL_LIST,
"ram": buildpb.Attribute_INTEGER,
"reexport_deps": buildpb.Attribute_LABEL_LIST,
"remote": buildpb.Attribute_STRING,
"remote_execution_properties": buildpb.Attribute_STRING,
"repository": buildpb.Attribute_STRING,
"require_defined_version": buildpb.Attribute_BOOLEAN,
"requires_action_output": buildpb.Attribute_BOOLEAN,
"resource_configuration_filters": buildpb.Attribute_STRING_LIST,
"resource_files": buildpb.Attribute_LABEL_LIST,
"resource_jars": buildpb.Attribute_LABEL_LIST,
"resource_strip_prefix": buildpb.Attribute_STRING,
"resourcejar": buildpb.Attribute_LABEL_LIST,
"resources": buildpb.Attribute_LABEL_LIST,
"restricted_to": buildpb.Attribute_LABEL_LIST,
"runtime": buildpb.Attribute_LABEL,
"runtime_deps": buildpb.Attribute_LABEL_LIST,
"runtimes": buildpb.Attribute_LABEL_DICT_UNARY,
"scope": buildpb.Attribute_LABEL_LIST,
"screen_density": buildpb.Attribute_INTEGER,
"sdk_dylibs": buildpb.Attribute_STRING_LIST,
"sdk_frameworks": buildpb.Attribute_STRING_LIST,
"sdk_includes": buildpb.Attribute_STRING_LIST,
@@ -200,6 +210,8 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"sha1": buildpb.Attribute_STRING,
"sha256": buildpb.Attribute_STRING,
"shard_count": buildpb.Attribute_INTEGER,
"shared_library": buildpb.Attribute_LABEL,
"shrink_resources": buildpb.Attribute_TRISTATE,
"singlejar": buildpb.Attribute_LABEL_LIST,
"size": buildpb.Attribute_STRING,
"source_version": buildpb.Attribute_STRING,
@@ -207,39 +219,52 @@ var TypeOf = map[string]buildpb.Attribute_Discriminator{
"srcs": buildpb.Attribute_LABEL_LIST,
"srcs_version": buildpb.Attribute_STRING,
"stamp": buildpb.Attribute_TRISTATE,
"static_library": buildpb.Attribute_LABEL,
"static_runtime_libs": buildpb.Attribute_LABEL_LIST,
"storyboards": buildpb.Attribute_LABEL_LIST,
"strict": buildpb.Attribute_BOOLEAN,
"strings": buildpb.Attribute_LABEL_LIST,
"strip": buildpb.Attribute_BOOLEAN,
"strip_files": buildpb.Attribute_LABEL,
"strip_include_prefix": buildpb.Attribute_STRING,
"strip_prefix": buildpb.Attribute_STRING,
"structured_resources": buildpb.Attribute_LABEL_LIST,
"support_apks": buildpb.Attribute_LABEL_LIST,
"supports_header_parsing": buildpb.Attribute_BOOLEAN,
"supports_param_files": buildpb.Attribute_BOOLEAN,
"system_image": buildpb.Attribute_LABEL,
"system_provided": buildpb.Attribute_BOOLEAN,
"tag": buildpb.Attribute_STRING,
"tags": buildpb.Attribute_STRING_LIST,
"target_compatible_with": buildpb.Attribute_LABEL_LIST,
"target_device": buildpb.Attribute_LABEL,
"target_version": buildpb.Attribute_STRING,
"test_app": buildpb.Attribute_LABEL,
"test_class": buildpb.Attribute_STRING,
"testonly": buildpb.Attribute_BOOLEAN,
"tests": buildpb.Attribute_LABEL_LIST,
"textual_hdrs": buildpb.Attribute_LABEL_LIST,
"timeout": buildpb.Attribute_STRING,
"timezone_data": buildpb.Attribute_LABEL,
"toolchain": buildpb.Attribute_STRING,
"toolchain_type": buildpb.Attribute_STRING,
"toolchains": buildpb.Attribute_LABEL_LIST,
"tools": buildpb.Attribute_LABEL_LIST,
"type": buildpb.Attribute_STRING,
"url": buildpb.Attribute_STRING,
"use_objc_header_names": buildpb.Attribute_BOOLEAN,
"urls": buildpb.Attribute_STRING_LIST,
"use_testrunner": buildpb.Attribute_BOOLEAN,
"values": buildpb.Attribute_STRING_DICT,
"version": buildpb.Attribute_STRING,
"versions": buildpb.Attribute_LABEL_LIST,
"vertical_resolution": buildpb.Attribute_INTEGER,
"visibility": buildpb.Attribute_STRING_LIST,
"vm_heap": buildpb.Attribute_INTEGER,
"weak_sdk_frameworks": buildpb.Attribute_STRING_LIST,
"xcode": buildpb.Attribute_LABEL,
"xctest": buildpb.Attribute_BOOLEAN,
"xctest_app": buildpb.Attribute_LABEL,
"win_def_file": buildpb.Attribute_LABEL,
"workspace_file": buildpb.Attribute_STRING,
"workspace_file_content": buildpb.Attribute_STRING,
"xcenv_based_path": buildpb.Attribute_STRING,
"xibs": buildpb.Attribute_LABEL_LIST,
"xlint": buildpb.Attribute_STRING_LIST,
}

View File

@@ -199,13 +199,90 @@ var NamePriority = map[string]int{
"implementation": 5,
"implements": 6,
"alwayslink": 7,
// default condition in a dictionary literal passed to select should be
// the last one by convention.
"//conditions:default": 50,
}
var StripLabelLeadingSlashes = false
var ShortenAbsoluteLabelsToRelative = false
var FormatBzlFiles = false
// AndroidNativeRules lists all Android rules that are being migrated from Native to Starlark.
var AndroidNativeRules = []string{
"aar_import",
"android_binary",
"android_device",
"android_instrumentation_test",
"android_library",
"android_local_test",
"android_ndk_respository",
"android_sdk_repository",
}
// AndroidLoadPath is the load path for the Starlark Android Rules.
var AndroidLoadPath = "@rules_android//android:rules.bzl"
// CcNativeRules lists all C++ rules that are being migrated from Native to Starlark.
var CcNativeRules = []string{
"cc_binary",
"cc_test",
"cc_library",
"cc_import",
"cc_proto_library",
"fdo_prefetch_hints",
"fdo_profile",
"cc_toolchain",
"cc_toolchain_suite",
"objc_library",
"objc_import",
}
// CcLoadPath is the load path for the Starlark C++ Rules.
var CcLoadPath = "@rules_cc//cc:defs.bzl"
// JavaNativeRules lists all Java rules that are being migrated from Native to Starlark.
var JavaNativeRules = []string{
"java_binary",
"java_import",
"java_library",
"java_lite_proto_library",
"java_proto_library",
"java_test",
"java_package_configuration",
"java_plugin",
"java_runtime",
"java_toolchain",
}
// JavaLoadPath is the load path for the Starlark Java Rules.
var JavaLoadPath = "@rules_java//java:defs.bzl"
// PyNativeRules lists all Python rules that are being migrated from Native to Starlark.
var PyNativeRules = []string{
"py_library",
"py_binary",
"py_test",
"py_runtime",
}
// PyLoadPath is the load path for the Starlark Python Rules.
var PyLoadPath = "@rules_python//python:defs.bzl"
// ProtoNativeRules lists all Proto rules that are being migrated from Native to Starlark.
var ProtoNativeRules = []string{
"proto_lang_toolchain",
"proto_library",
}
// ProtoNativeSymbols lists all Proto symbols that are being migrated from Native to Starlark.
var ProtoNativeSymbols = []string{
"ProtoInfo",
"proto_common",
}
// ProtoLoadPath is the load path for the Starlark Proto Rules.
var ProtoLoadPath = "@rules_proto//proto:defs.bzl"
// OverrideTables allows a user of the build package to override the special-case rules. The user-provided tables replace the built-in tables.
func OverrideTables(labelArg, blacklist, listArg, sortableListArg, sortBlacklist, sortWhitelist map[string]bool, namePriority map[string]int, stripLabelLeadingSlashes, shortenAbsoluteLabelsToRelative bool) {

View File

@@ -68,7 +68,7 @@ func FindWorkspaceRoot(rootDir string) (root string, rest string) {
// Find searches from the given dir and up for the WORKSPACE file
// returning the directory containing it, or an error if none found in the tree.
func Find(dir string) (string, error) {
if dir == "" || dir == "/" || dir == "." {
if dir == "" || dir == "/" || dir == "." || (len(dir) == 3 && strings.HasSuffix(dir, ":\\")) {
return "", os.ErrNotExist
}
for repoRootFile, fiFunc := range repoRootFiles {