2005-04-28 16:46:33 +02:00
|
|
|
#include "cache.h"
|
2006-01-07 10:33:54 +01:00
|
|
|
#include "tag.h"
|
2018-05-16 01:42:15 +02:00
|
|
|
#include "object-store.h"
|
2007-02-26 20:56:00 +01:00
|
|
|
#include "commit.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "blob.h"
|
2018-05-15 23:48:42 +02:00
|
|
|
#include "alloc.h"
|
2017-01-18 00:37:18 +01:00
|
|
|
#include "gpg-interface.h"
|
2023-02-24 01:09:27 +01:00
|
|
|
#include "hex.h"
|
2018-07-13 02:03:07 +02:00
|
|
|
#include "packfile.h"
|
2005-04-28 16:46:33 +02:00
|
|
|
|
|
|
|
const char *tag_type = "tag";
|
|
|
|
|
2016-04-22 16:52:04 +02:00
|
|
|
static int run_gpg_verify(const char *buf, unsigned long size, unsigned flags)
|
|
|
|
{
|
|
|
|
struct signature_check sigc;
|
2021-02-11 03:08:03 +01:00
|
|
|
struct strbuf payload = STRBUF_INIT;
|
|
|
|
struct strbuf signature = STRBUF_INIT;
|
2016-04-22 16:52:04 +02:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
memset(&sigc, 0, sizeof(sigc));
|
|
|
|
|
2021-02-11 03:08:03 +01:00
|
|
|
if (!parse_signature(buf, size, &payload, &signature)) {
|
2016-04-22 16:52:04 +02:00
|
|
|
if (flags & GPG_VERIFY_VERBOSE)
|
2021-02-11 03:08:03 +01:00
|
|
|
write_in_full(1, buf, size);
|
2016-04-22 16:52:04 +02:00
|
|
|
return error("no signature found");
|
|
|
|
}
|
|
|
|
|
2021-12-09 09:52:47 +01:00
|
|
|
sigc.payload_type = SIGNATURE_PAYLOAD_TAG;
|
2021-12-09 09:52:43 +01:00
|
|
|
sigc.payload = strbuf_detach(&payload, &sigc.payload_len);
|
|
|
|
ret = check_signature(&sigc, signature.buf, signature.len);
|
2017-01-18 00:37:18 +01:00
|
|
|
|
|
|
|
if (!(flags & GPG_VERIFY_OMIT_STATUS))
|
|
|
|
print_signature_buffer(&sigc, flags);
|
2016-04-22 16:52:04 +02:00
|
|
|
|
|
|
|
signature_check_clear(&sigc);
|
2021-02-11 03:08:03 +01:00
|
|
|
strbuf_release(&payload);
|
|
|
|
strbuf_release(&signature);
|
2016-04-22 16:52:04 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-07-13 02:44:15 +02:00
|
|
|
int gpg_verify_tag(const struct object_id *oid, const char *name_to_report,
|
2016-04-22 16:52:04 +02:00
|
|
|
unsigned flags)
|
|
|
|
{
|
|
|
|
enum object_type type;
|
|
|
|
char *buf;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
2018-04-25 20:20:59 +02:00
|
|
|
type = oid_object_info(the_repository, oid, NULL);
|
2016-04-22 16:52:04 +02:00
|
|
|
if (type != OBJ_TAG)
|
|
|
|
return error("%s: cannot verify a non-tag object of type %s.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2018-03-12 03:27:30 +01:00
|
|
|
find_unique_abbrev(oid, DEFAULT_ABBREV),
|
2018-02-14 19:59:24 +01:00
|
|
|
type_name(type));
|
2016-04-22 16:52:04 +02:00
|
|
|
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
buf = read_object_file(oid, &type, &size);
|
2016-04-22 16:52:04 +02:00
|
|
|
if (!buf)
|
|
|
|
return error("%s: unable to read file.",
|
|
|
|
name_to_report ?
|
|
|
|
name_to_report :
|
2018-03-12 03:27:30 +01:00
|
|
|
find_unique_abbrev(oid, DEFAULT_ABBREV));
|
2016-04-22 16:52:04 +02:00
|
|
|
|
|
|
|
ret = run_gpg_verify(buf, size, flags);
|
|
|
|
|
|
|
|
free(buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-29 03:22:20 +02:00
|
|
|
struct object *deref_tag(struct repository *r, struct object *o, const char *warn, int warnlen)
|
2005-08-05 09:47:56 +02:00
|
|
|
{
|
2018-07-13 02:03:07 +02:00
|
|
|
struct object_id *last_oid = NULL;
|
2006-07-12 05:45:31 +02:00
|
|
|
while (o && o->type == OBJ_TAG)
|
2018-07-13 02:03:07 +02:00
|
|
|
if (((struct tag *)o)->tagged) {
|
|
|
|
last_oid = &((struct tag *)o)->tagged->oid;
|
2018-08-03 00:30:46 +02:00
|
|
|
o = parse_object(r, last_oid);
|
2018-07-13 02:03:07 +02:00
|
|
|
} else {
|
|
|
|
last_oid = NULL;
|
2008-02-18 08:31:55 +01:00
|
|
|
o = NULL;
|
2018-07-13 02:03:07 +02:00
|
|
|
}
|
2005-11-03 00:19:13 +01:00
|
|
|
if (!o && warn) {
|
2018-07-13 02:03:07 +02:00
|
|
|
if (last_oid && is_promisor_object(last_oid))
|
|
|
|
return NULL;
|
2005-11-03 00:19:13 +01:00
|
|
|
if (!warnlen)
|
|
|
|
warnlen = strlen(warn);
|
|
|
|
error("missing object referenced by '%.*s'", warnlen, warn);
|
|
|
|
}
|
2005-08-05 09:47:56 +02:00
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 20:18:01 +01:00
|
|
|
struct object *deref_tag_noverify(struct object *o)
|
|
|
|
{
|
|
|
|
while (o && o->type == OBJ_TAG) {
|
2018-06-29 03:21:51 +02:00
|
|
|
o = parse_object(the_repository, &o->oid);
|
upload-pack: avoid parsing tag destinations
When upload-pack advertises refs, it dereferences any tags
it sees, and shows the resulting sha1 to the client. It does
this by calling deref_tag. That function must load and parse
each tag object to find the sha1 of the tagged object.
However, it also ends up parsing the tagged object itself,
which is not strictly necessary for upload-pack's use.
Each tag produces two object loads (assuming it is not a
recursive tag), when it could get away with only a single
one. Dropping the second load halves the effort we spend.
The downside is that we are no longer verifying the
resulting object by loading it. In particular:
1. We never cross-check the "type" field given in the tag
object with the type of the pointed-to object. If the
tag says it points to a tag but doesn't, then we will
keep peeling and realize the error. If the tag says it
points to a non-tag but actually points to a tag, we
will stop peeling and just advertise the pointed-to
tag.
2. If we are missing the pointed-to object, we will not
realize (because we never even look it up in the object
db).
However, both of these are errors in the object database,
and both will be detected if a client actually requests the
broken objects in question. So we are simply pushing the
verification away from the advertising stage, and down to
the actual fetching stage.
On my test repo with 120K refs, this drops the time to
advertise the refs from ~3.2s to ~2.0s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 20:18:01 +01:00
|
|
|
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
|
|
|
|
o = ((struct tag *)o)->tagged;
|
|
|
|
else
|
|
|
|
o = NULL;
|
|
|
|
}
|
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2018-06-29 03:22:11 +02:00
|
|
|
struct tag *lookup_tag(struct repository *r, const struct object_id *oid)
|
2005-04-28 16:46:33 +02:00
|
|
|
{
|
2019-06-20 09:41:14 +02:00
|
|
|
struct object *obj = lookup_object(r, oid);
|
2007-04-17 07:11:43 +02:00
|
|
|
if (!obj)
|
2019-06-20 09:41:21 +02:00
|
|
|
return create_object(r, oid, alloc_tag_node(r));
|
2020-06-17 11:14:08 +02:00
|
|
|
return object_as_type(obj, OBJ_TAG, 0);
|
2005-04-28 16:46:33 +02:00
|
|
|
}
|
|
|
|
|
2017-04-26 21:29:31 +02:00
|
|
|
static timestamp_t parse_tag_date(const char *buf, const char *tail)
|
2010-04-13 01:25:28 +02:00
|
|
|
{
|
|
|
|
const char *dateptr;
|
|
|
|
|
|
|
|
while (buf < tail && *buf++ != '>')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
|
|
|
dateptr = buf;
|
|
|
|
while (buf < tail && *buf++ != '\n')
|
|
|
|
/* nada */;
|
|
|
|
if (buf >= tail)
|
|
|
|
return 0;
|
2017-04-21 12:45:44 +02:00
|
|
|
/* dateptr < buf && buf[-1] == '\n', so parsing will stop at buf-1 */
|
|
|
|
return parse_timestamp(dateptr, NULL, 10);
|
2010-04-13 01:25:28 +02:00
|
|
|
}
|
|
|
|
|
2018-05-15 23:48:42 +02:00
|
|
|
void release_tag_memory(struct tag *t)
|
|
|
|
{
|
|
|
|
free(t->tag);
|
|
|
|
t->tagged = NULL;
|
|
|
|
t->object.parsed = 0;
|
|
|
|
t->date = 0;
|
|
|
|
}
|
|
|
|
|
2018-06-29 03:22:12 +02:00
|
|
|
int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size)
|
2005-04-28 16:46:33 +02:00
|
|
|
{
|
2017-05-07 00:10:02 +02:00
|
|
|
struct object_id oid;
|
2005-06-22 02:35:10 +02:00
|
|
|
char type[20];
|
2010-04-13 01:25:27 +02:00
|
|
|
const char *bufptr = data;
|
|
|
|
const char *tail = bufptr + size;
|
|
|
|
const char *nl;
|
2005-04-30 18:51:03 +02:00
|
|
|
|
2010-04-13 01:25:25 +02:00
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
commit, tag: don't set parsed bit for parse failures
If we can't parse a commit, then parse_commit() will return an error
code. But it _also_ sets the "parsed" flag, which tells us not to bother
trying to re-parse the object. That means that subsequent parses have no
idea that the information in the struct may be bogus. I.e., doing this:
parse_commit(commit);
...
if (parse_commit(commit) < 0)
die("commit is broken");
will never trigger the die(). The second parse_commit() will see the
"parsed" flag and quietly return success.
There are two obvious ways to fix this:
1. Stop setting "parsed" until we've successfully parsed.
2. Keep a second "corrupt" flag to indicate that we saw an error (and
when the parsed flag is set, return 0/-1 depending on the corrupt
flag).
This patch does option 1. The obvious downside versus option 2 is that
we might continually re-parse a broken object. But in practice,
corruption like this is rare, and we typically die() or return an error
in the caller. So it's OK not to worry about optimizing for corruption.
And it's much simpler: we don't need to use an extra bit in the object
struct, and callers which check the "parsed" flag don't need to learn
about the corrupt bit, too.
There's no new test here, because this case is already covered in t5318.
Note that we do need to update the expected message there, because we
now detect the problem in the return from "parse_commit()", and not with
a separate check for a NULL tree. In fact, we can now ditch that
explicit tree check entirely, as we're covered robustly by this change
(and the previous recent change to treat a NULL tree as a parse error).
We'll also give tags the same treatment. I don't know offhand of any
cases where the problem can be triggered (it implies somebody ignoring a
parse error earlier in the process), but consistently returning an error
should cause the least surprise.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-25 23:20:20 +02:00
|
|
|
|
|
|
|
if (item->tag) {
|
|
|
|
/*
|
|
|
|
* Presumably left over from a previous failed parse;
|
|
|
|
* clear it out in preparation for re-parsing (we'll probably
|
|
|
|
* hit the same error, which lets us tell our current caller
|
|
|
|
* about the problem).
|
|
|
|
*/
|
|
|
|
FREE_AND_NULL(item->tag);
|
|
|
|
}
|
2005-04-28 16:46:33 +02:00
|
|
|
|
2018-10-15 02:01:58 +02:00
|
|
|
if (size < the_hash_algo->hexsz + 24)
|
2005-05-06 19:48:34 +02:00
|
|
|
return -1;
|
2017-05-07 00:10:02 +02:00
|
|
|
if (memcmp("object ", bufptr, 7) || parse_oid_hex(bufptr + 7, &oid, &bufptr) || *bufptr++ != '\n')
|
2005-05-06 19:48:34 +02:00
|
|
|
return -1;
|
2005-04-28 16:46:33 +02:00
|
|
|
|
2013-11-30 21:55:40 +01:00
|
|
|
if (!starts_with(bufptr, "type "))
|
2005-05-06 19:48:34 +02:00
|
|
|
return -1;
|
2010-04-13 01:25:27 +02:00
|
|
|
bufptr += 5;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl || sizeof(type) <= (nl - bufptr))
|
2005-05-06 19:48:34 +02:00
|
|
|
return -1;
|
2015-09-24 23:08:26 +02:00
|
|
|
memcpy(type, bufptr, nl - bufptr);
|
2010-04-13 01:25:27 +02:00
|
|
|
type[nl - bufptr] = '\0';
|
|
|
|
bufptr = nl + 1;
|
2005-04-28 16:46:33 +02:00
|
|
|
|
2007-02-26 20:56:00 +01:00
|
|
|
if (!strcmp(type, blob_type)) {
|
2018-06-29 03:22:12 +02:00
|
|
|
item->tagged = (struct object *)lookup_blob(r, &oid);
|
2007-02-26 20:56:00 +01:00
|
|
|
} else if (!strcmp(type, tree_type)) {
|
2018-06-29 03:22:12 +02:00
|
|
|
item->tagged = (struct object *)lookup_tree(r, &oid);
|
2007-02-26 20:56:00 +01:00
|
|
|
} else if (!strcmp(type, commit_type)) {
|
2018-06-29 03:22:12 +02:00
|
|
|
item->tagged = (struct object *)lookup_commit(r, &oid);
|
2007-02-26 20:56:00 +01:00
|
|
|
} else if (!strcmp(type, tag_type)) {
|
2018-06-29 03:22:12 +02:00
|
|
|
item->tagged = (struct object *)lookup_tag(r, &oid);
|
2007-02-26 20:56:00 +01:00
|
|
|
} else {
|
parse_tag_buffer(): treat NULL tag pointer as parse error
When parsing a tag, we may end up with a NULL "tagged" field when
there's a type mismatch (e.g., the tag claims to point to object X as a
commit, but we previously saw X as a blob in the same process), but we
do not otherwise indicate a parse failure to the caller.
This is similar to the case discussed in the previous commit, where a
commit could end up with a NULL tree field: while slightly convenient
for callers who want to overlook a corrupt object, it means that normal
callers have to explicitly deal with this case (rather than just relying
on the return code from parsing). And most don't, leading to segfault
fixes like the one in c77722b3ea (use get_tagged_oid(), 2019-09-05).
Let's address this more centrally, by returning an error code from the
parse itself, which most callers would already notice (adventurous
callers are free to ignore the error and continue looking at the
struct).
This also covers the case where the tag contains a nonsensical "type"
field (there we produced a user-visible error but still returned success
to the caller; now we'll produce a slightly better message and return an
error).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-18 06:45:35 +02:00
|
|
|
return error("unknown tag type '%s' in %s",
|
|
|
|
type, oid_to_hex(&item->object.oid));
|
2007-02-26 20:56:00 +01:00
|
|
|
}
|
|
|
|
|
parse_tag_buffer(): treat NULL tag pointer as parse error
When parsing a tag, we may end up with a NULL "tagged" field when
there's a type mismatch (e.g., the tag claims to point to object X as a
commit, but we previously saw X as a blob in the same process), but we
do not otherwise indicate a parse failure to the caller.
This is similar to the case discussed in the previous commit, where a
commit could end up with a NULL tree field: while slightly convenient
for callers who want to overlook a corrupt object, it means that normal
callers have to explicitly deal with this case (rather than just relying
on the return code from parsing). And most don't, leading to segfault
fixes like the one in c77722b3ea (use get_tagged_oid(), 2019-09-05).
Let's address this more centrally, by returning an error code from the
parse itself, which most callers would already notice (adventurous
callers are free to ignore the error and continue looking at the
struct).
This also covers the case where the tag contains a nonsensical "type"
field (there we produced a user-visible error but still returned success
to the caller; now we'll produce a slightly better message and return an
error).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-18 06:45:35 +02:00
|
|
|
if (!item->tagged)
|
|
|
|
return error("bad tag pointer to %s in %s",
|
|
|
|
oid_to_hex(&oid),
|
|
|
|
oid_to_hex(&item->object.oid));
|
|
|
|
|
2013-11-30 21:55:40 +01:00
|
|
|
if (bufptr + 4 < tail && starts_with(bufptr, "tag "))
|
2011-02-14 14:02:51 +01:00
|
|
|
; /* good */
|
|
|
|
else
|
2010-04-13 01:25:27 +02:00
|
|
|
return -1;
|
|
|
|
bufptr += 4;
|
|
|
|
nl = memchr(bufptr, '\n', tail - bufptr);
|
|
|
|
if (!nl)
|
|
|
|
return -1;
|
|
|
|
item->tag = xmemdupz(bufptr, nl - bufptr);
|
|
|
|
bufptr = nl + 1;
|
|
|
|
|
2013-11-30 21:55:40 +01:00
|
|
|
if (bufptr + 7 < tail && starts_with(bufptr, "tagger "))
|
2010-04-13 01:25:28 +02:00
|
|
|
item->date = parse_tag_date(bufptr, tail);
|
|
|
|
else
|
|
|
|
item->date = 0;
|
|
|
|
|
commit, tag: don't set parsed bit for parse failures
If we can't parse a commit, then parse_commit() will return an error
code. But it _also_ sets the "parsed" flag, which tells us not to bother
trying to re-parse the object. That means that subsequent parses have no
idea that the information in the struct may be bogus. I.e., doing this:
parse_commit(commit);
...
if (parse_commit(commit) < 0)
die("commit is broken");
will never trigger the die(). The second parse_commit() will see the
"parsed" flag and quietly return success.
There are two obvious ways to fix this:
1. Stop setting "parsed" until we've successfully parsed.
2. Keep a second "corrupt" flag to indicate that we saw an error (and
when the parsed flag is set, return 0/-1 depending on the corrupt
flag).
This patch does option 1. The obvious downside versus option 2 is that
we might continually re-parse a broken object. But in practice,
corruption like this is rare, and we typically die() or return an error
in the caller. So it's OK not to worry about optimizing for corruption.
And it's much simpler: we don't need to use an extra bit in the object
struct, and callers which check the "parsed" flag don't need to learn
about the corrupt bit, too.
There's no new test here, because this case is already covered in t5318.
Note that we do need to update the expected message there, because we
now detect the problem in the return from "parse_commit()", and not with
a separate check for a NULL tree. In fact, we can now ditch that
explicit tree check entirely, as we're covered robustly by this change
(and the previous recent change to treat a NULL tree as a parse error).
We'll also give tags the same treatment. I don't know offhand of any
cases where the problem can be triggered (it implies somebody ignoring a
parse error earlier in the process), but consistently returning an error
should cause the least surprise.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-25 23:20:20 +02:00
|
|
|
item->object.parsed = 1;
|
2005-04-28 16:46:33 +02:00
|
|
|
return 0;
|
2005-05-06 19:48:34 +02:00
|
|
|
}
|
2005-05-04 19:44:15 +02:00
|
|
|
|
2005-05-06 19:48:34 +02:00
|
|
|
int parse_tag(struct tag *item)
|
|
|
|
{
|
2007-02-26 20:55:59 +01:00
|
|
|
enum object_type type;
|
2005-05-06 19:48:34 +02:00
|
|
|
void *data;
|
|
|
|
unsigned long size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (item->object.parsed)
|
|
|
|
return 0;
|
sha1_file: convert read_sha1_file to struct object_id
Convert read_sha1_file to take a pointer to struct object_id and rename
it read_object_file. Do the same for read_sha1_file_extended.
Convert one use in grep.c to use the new function without any other code
change, since the pointer being passed is a void pointer that is already
initialized with a pointer to struct object_id. Update the declaration
and definitions of the modified functions, and apply the following
semantic patch to convert the remaining callers:
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1.hash, E2, E3)
+ read_object_file(&E1, E2, E3)
@@
expression E1, E2, E3;
@@
- read_sha1_file(E1->hash, E2, E3)
+ read_object_file(E1, E2, E3)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1.hash, E2, E3, E4)
+ read_object_file_extended(&E1, E2, E3, E4)
@@
expression E1, E2, E3, E4;
@@
- read_sha1_file_extended(E1->hash, E2, E3, E4)
+ read_object_file_extended(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-03-12 03:27:53 +01:00
|
|
|
data = read_object_file(&item->object.oid, &type, &size);
|
2005-05-06 19:48:34 +02:00
|
|
|
if (!data)
|
|
|
|
return error("Could not read %s",
|
2015-11-10 03:22:28 +01:00
|
|
|
oid_to_hex(&item->object.oid));
|
2007-02-26 20:55:59 +01:00
|
|
|
if (type != OBJ_TAG) {
|
2005-05-06 19:48:34 +02:00
|
|
|
free(data);
|
|
|
|
return error("Object %s not a tag",
|
2015-11-10 03:22:28 +01:00
|
|
|
oid_to_hex(&item->object.oid));
|
2005-05-06 19:48:34 +02:00
|
|
|
}
|
2018-06-29 03:22:04 +02:00
|
|
|
ret = parse_tag_buffer(the_repository, item, data, size);
|
2005-05-04 19:44:15 +02:00
|
|
|
free(data);
|
2005-05-06 19:48:34 +02:00
|
|
|
return ret;
|
2005-04-28 16:46:33 +02:00
|
|
|
}
|
2019-09-05 21:55:55 +02:00
|
|
|
|
|
|
|
struct object_id *get_tagged_oid(struct tag *tag)
|
|
|
|
{
|
|
|
|
if (!tag->tagged)
|
|
|
|
die("bad tag");
|
|
|
|
return &tag->tagged->oid;
|
|
|
|
}
|